skytools-3.2.6/0000755000000000000000000000000012426435645010312 5ustar skytools-3.2.6/source.cfg0000644000000000000000000000302512426435645012273 0ustar # what to include in source distribution # MANIFEST.in for Python Distutils include INSTALL COPYRIGHT README NEWS include Makefile config.mak.in configure.ac autogen.sh include configure config.guess config.sub install-sh include setup_skytools.py setup_pkgloader.py source.cfg include python/pkgloader.py include sql/common-pgxs.mk recursive-include sql *.sql Makefile *.out *.in *.[ch] README* *.ini *.templ *.control prune sql/*/results prune sql/*/docs recursive-include python/conf *.ini recursive-include misc *.sh *.rc *.py *.css Cindent recursive-include scripts *.py *.templ *.ini recursive-include debian changelog control control.in docs rules compat pgversions README.* recursive-include debian *.dirs *.docs *.install *.init.d *.manpages *.postinst *.prerm *.ini recursive-include doc Makefile *.py *.txt *.[1-9] include debian/copyright prune debian/postgresql-*-pgq3 prune debian/skytools3 prune debian/skytools3-walmgr prune debian/skytools3-ticker prune debian/python-skytools3 prune debian/python-pgq3 prune debian/skytools3 prune debian/skytools3 prune debian/tmp include python/skytools/installer_config.py.in prune python/skytools/installer_config.py recursive-include lib *.[chg] *.m4 *.mk *.h.in *.sh *-sh README COPYRIGHT prune lib/mk/temos exclude lib/usual/config.h prune lib/test prune lib/autogen.sh recursive-include upgrade *.sql Makefile recursive-include tests *.conf *.sh *.ini *.py Makefile data.sql install.sql *.sql *.conf prune fix*.sql prune tests/merge/conf prune tests/handler/conf prune tests/londiste/conf skytools-3.2.6/python/0000755000000000000000000000000012426435645011633 5ustar skytools-3.2.6/python/pgq/0000755000000000000000000000000012426435645012422 5ustar skytools-3.2.6/python/pgq/status.py0000644000000000000000000000643212426435645014324 0ustar """Status display. """ import sys, skytools __all__ = ['PGQStatus'] def ival(data, _as = None): "Format interval for output" if not _as: _as = data.split('.')[-1] numfmt = 'FM9999999' expr = "coalesce(to_char(extract(epoch from %s), '%s') || 's', 'NULL') as %s" return expr % (data, numfmt, _as) class PGQStatus(skytools.DBScript): """Info gathering and display.""" def __init__(self, args, check = 0): skytools.DBScript.__init__(self, 'pgqadm', args) self.show_status() sys.exit(0) def show_status(self): db = self.get_database("db", autocommit=1) cx = db.cursor() cx.execute("show server_version") pgver = cx.fetchone()[0] cx.execute("select pgq.version()") qver = cx.fetchone()[0] print("Postgres version: %s PgQ version: %s" % (pgver, qver)) q = """select f.queue_name, f.queue_ntables, %s, %s, %s, %s, q.queue_ticker_max_count, f.ev_per_sec, f.ev_new from pgq.get_queue_info() f, pgq.queue q where q.queue_name = f.queue_name""" % ( ival('f.queue_rotation_period'), ival('f.ticker_lag'), ival('q.queue_ticker_max_lag'), ival('q.queue_ticker_idle_period'), ) cx.execute(q) event_rows = cx.fetchall() q = """select queue_name, consumer_name, %s, %s, pending_events from pgq.get_consumer_info()""" % ( ival('lag'), ival('last_seen'), ) cx.execute(q) consumer_rows = cx.fetchall() print("\n%-33s %9s %13s %6s %6s %5s" % ('Event queue', 'Rotation', 'Ticker', 'TLag', 'EPS', 'New')) print('-' * 78) for ev_row in event_rows: tck = "%s/%s/%s" % (ev_row['queue_ticker_max_count'], ev_row['queue_ticker_max_lag'], ev_row['queue_ticker_idle_period']) rot = "%s/%s" % (ev_row['queue_ntables'], ev_row['queue_rotation_period']) print("%-33s %9s %13s %6s %6.1f %5d" % ( ev_row['queue_name'], rot, tck, ev_row['ticker_lag'], ev_row['ev_per_sec'], ev_row['ev_new'], )) print('-' * 78) print("\n%-48s %9s %9s %8s" % ( 'Consumer', 'Lag', 'LastSeen', 'Pending')) print('-' * 78) for ev_row in event_rows: cons = self.pick_consumers(ev_row, consumer_rows) self.show_queue(ev_row, cons) print('-' * 78) db.commit() def show_consumer(self, cons): print(" %-46s %9s %9s %8d" % ( cons['consumer_name'], cons['lag'], cons['last_seen'], cons['pending_events'])) def show_queue(self, ev_row, consumer_rows): print("%(queue_name)s:" % ev_row) for cons in consumer_rows: self.show_consumer(cons) def pick_consumers(self, ev_row, consumer_rows): res = [] for con in consumer_rows: if con['queue_name'] != ev_row['queue_name']: continue res.append(con) return res skytools-3.2.6/python/pgq/event.py0000644000000000000000000000370312426435645014120 0ustar """PgQ event container. """ __all__ = ['Event'] _fldmap = { 'ev_id': 'ev_id', 'ev_txid': 'ev_txid', 'ev_time': 'ev_time', 'ev_type': 'ev_type', 'ev_data': 'ev_data', 'ev_extra1': 'ev_extra1', 'ev_extra2': 'ev_extra2', 'ev_extra3': 'ev_extra3', 'ev_extra4': 'ev_extra4', 'ev_retry': 'ev_retry', 'id': 'ev_id', 'txid': 'ev_txid', 'time': 'ev_time', 'type': 'ev_type', 'data': 'ev_data', 'extra1': 'ev_extra1', 'extra2': 'ev_extra2', 'extra3': 'ev_extra3', 'extra4': 'ev_extra4', 'retry': 'ev_retry', } class Event(object): """Event data for consumers. Will be removed from the queue by default. """ __slots__ = ('_event_row', 'retry_time', 'queue_name') def __init__(self, queue_name, row): self._event_row = row self.retry_time = 60 self.queue_name = queue_name def __getattr__(self, key): return self._event_row[_fldmap[key]] # would be better in RetriableEvent only since we don't care but # unfortunately it needs to be defined here due to compatibility concerns def tag_done(self): pass # be also dict-like def __getitem__(self, k): return self._event_row.__getitem__(k) def __contains__(self, k): return self._event_row.__contains__(k) def get(self, k, d=None): return self._event_row.get(k, d) def has_key(self, k): return self._event_row.has_key(k) def keys(self): return self._event_row.keys() def values(self): return self._event_row.keys() def items(self): return self._event_row.items() def iterkeys(self): return self._event_row.iterkeys() def itervalues(self): return self._event_row.itervalues() def __str__(self): return "" % ( self.id, self.type, self.data, self.extra1, self.extra2, self.extra3, self.extra4) skytools-3.2.6/python/pgq/producer.py0000644000000000000000000000215112426435645014616 0ustar """PgQ producer helpers for Python. """ import skytools __all__ = ['bulk_insert_events', 'insert_event'] _fldmap = { 'id': 'ev_id', 'time': 'ev_time', 'type': 'ev_type', 'data': 'ev_data', 'extra1': 'ev_extra1', 'extra2': 'ev_extra2', 'extra3': 'ev_extra3', 'extra4': 'ev_extra4', 'ev_id': 'ev_id', 'ev_time': 'ev_time', 'ev_type': 'ev_type', 'ev_data': 'ev_data', 'ev_extra1': 'ev_extra1', 'ev_extra2': 'ev_extra2', 'ev_extra3': 'ev_extra3', 'ev_extra4': 'ev_extra4', } def bulk_insert_events(curs, rows, fields, queue_name): q = "select pgq.current_event_table(%s)" curs.execute(q, [queue_name]) tbl = curs.fetchone()[0] db_fields = map(_fldmap.get, fields) skytools.magic_insert(curs, tbl, rows, db_fields) def insert_event(curs, queue, ev_type, ev_data, extra1=None, extra2=None, extra3=None, extra4=None): q = "select pgq.insert_event(%s, %s, %s, %s, %s, %s, %s)" curs.execute(q, [queue, ev_type, ev_data, extra1, extra2, extra3, extra4]) return curs.fetchone()[0] skytools-3.2.6/python/pgq/baseconsumer.py0000644000000000000000000002543312426435645015471 0ustar """PgQ consumer framework for Python. todo: - pgq.next_batch_details() - tag_done() by default """ import sys, time, skytools from pgq.event import * __all__ = ['BaseConsumer', 'BaseBatchWalker'] class BaseBatchWalker(object): """Lazy iterator over batch events. Events are loaded using cursor. It will be given as ev_list to process_batch(). It allows: - one for loop over events - len() after that """ def __init__(self, curs, batch_id, queue_name, fetch_size = 300, consumer_filter = None): self.queue_name = queue_name self.fetch_size = fetch_size self.sql_cursor = "batch_walker" self.curs = curs self.length = 0 self.batch_id = batch_id self.fetch_status = 0 # 0-not started, 1-in-progress, 2-done self.consumer_filter = consumer_filter def _make_event(self, queue_name, row): return Event(queue_name, row) def __iter__(self): if self.fetch_status: raise Exception("BatchWalker: double fetch? (%d)" % self.fetch_status) self.fetch_status = 1 q = "select * from pgq.get_batch_cursor(%s, %s, %s, %s)" self.curs.execute(q, [self.batch_id, self.sql_cursor, self.fetch_size, self.consumer_filter]) # this will return first batch of rows q = "fetch %d from %s" % (self.fetch_size, self.sql_cursor) while 1: rows = self.curs.fetchall() if not len(rows): break self.length += len(rows) for row in rows: ev = self._make_event(self.queue_name, row) yield ev # if less rows than requested, it was final block if len(rows) < self.fetch_size: break # request next block of rows self.curs.execute(q) self.curs.execute("close %s" % self.sql_cursor) self.fetch_status = 2 def __len__(self): return self.length class BaseConsumer(skytools.DBScript): """Consumer base class. Do not subclass directly (use pgq.Consumer or pgq.LocalConsumer instead) Config template:: ## Parameters for pgq.Consumer ## # queue name to read from queue_name = # override consumer name #consumer_name = %(job_name)s # filter out only events for specific tables #table_filter = table1, table2 # whether to use cursor to fetch events (0 disables) #pgq_lazy_fetch = 300 # whether to read from source size in autocommmit mode # not compatible with pgq_lazy_fetch # the actual user script on top of pgq.Consumer must also support it #pgq_autocommit = 0 # whether to wait for specified number of events, # before assigning a batch (0 disables) #pgq_batch_collect_events = 0 # whether to wait specified amount of time, # before assigning a batch (postgres interval) #pgq_batch_collect_interval = # whether to stay behind queue top (postgres interval) #pgq_keep_lag = # in how many seconds to write keepalive stats for idle consumers # this stats is used for detecting that consumer is still running #keepalive_stats = 300 """ # by default, use cursor-based fetch default_lazy_fetch = 300 # should reader connection be used in autocommit mode pgq_autocommit = 0 # proper variables consumer_name = None queue_name = None # compat variables pgq_queue_name = None pgq_consumer_id = None pgq_lazy_fetch = None pgq_min_count = None pgq_min_interval = None pgq_min_lag = None batch_info = None consumer_filter = None keepalive_stats = None # statistics: time spent waiting for events idle_start = None _batch_walker_class = BaseBatchWalker def __init__(self, service_name, db_name, args): """Initialize new consumer. @param service_name: service_name for DBScript @param db_name: name of database for get_database() @param args: cmdline args for DBScript """ skytools.DBScript.__init__(self, service_name, args) self.db_name = db_name # compat params self.consumer_name = self.cf.get("pgq_consumer_id", '') self.queue_name = self.cf.get("pgq_queue_name", '') # proper params if not self.consumer_name: self.consumer_name = self.cf.get("consumer_name", self.job_name) if not self.queue_name: self.queue_name = self.cf.get("queue_name") self.stat_batch_start = 0 # compat vars self.pgq_queue_name = self.queue_name self.consumer_id = self.consumer_name # set default just once self.pgq_autocommit = self.cf.getint("pgq_autocommit", self.pgq_autocommit) if self.pgq_autocommit and self.pgq_lazy_fetch: raise skytools.UsageError("pgq_autocommit is not compatible with pgq_lazy_fetch") self.set_database_defaults(self.db_name, autocommit = self.pgq_autocommit) self.idle_start = time.time() def reload(self): skytools.DBScript.reload(self) self.pgq_lazy_fetch = self.cf.getint("pgq_lazy_fetch", self.default_lazy_fetch) # set following ones to None if not set self.pgq_min_count = self.cf.getint("pgq_batch_collect_events", 0) or None self.pgq_min_interval = self.cf.get("pgq_batch_collect_interval", '') or None self.pgq_min_lag = self.cf.get("pgq_keep_lag", '') or None # filter out specific tables only tfilt = [] for t in self.cf.getlist('table_filter', ''): tfilt.append(skytools.quote_literal(skytools.fq_name(t))) if len(tfilt) > 0: expr = "ev_extra1 in (%s)" % ','.join(tfilt) self.consumer_filter = expr self.keepalive_stats = self.cf.getint("keepalive_stats", 300) def startup(self): """Handle commands here. __init__ does not have error logging.""" if self.options.register: self.register_consumer() sys.exit(0) if self.options.unregister: self.unregister_consumer() sys.exit(0) return skytools.DBScript.startup(self) def init_optparse(self, parser = None): p = skytools.DBScript.init_optparse(self, parser) p.add_option('--register', action='store_true', help = 'register consumer on queue') p.add_option('--unregister', action='store_true', help = 'unregister consumer from queue') return p def process_event(self, db, event): """Process one event. Should be overridden by user code. """ raise Exception("needs to be implemented") def process_batch(self, db, batch_id, event_list): """Process all events in batch. By default calls process_event for each. Can be overridden by user code. """ for ev in event_list: self.process_event(db, ev) def work(self): """Do the work loop, once (internal). Returns: true if wants to be called again, false if script can sleep. """ db = self.get_database(self.db_name) curs = db.cursor() self.stat_start() # acquire batch batch_id = self._load_next_batch(curs) db.commit() if batch_id == None: return 0 # load events ev_list = self._load_batch_events(curs, batch_id) db.commit() # process events self._launch_process_batch(db, batch_id, ev_list) # done self._finish_batch(curs, batch_id, ev_list) db.commit() self.stat_end(len(ev_list)) return 1 def register_consumer(self): self.log.info("Registering consumer on source queue") db = self.get_database(self.db_name) cx = db.cursor() cx.execute("select pgq.register_consumer(%s, %s)", [self.queue_name, self.consumer_name]) res = cx.fetchone()[0] db.commit() return res def unregister_consumer(self): self.log.info("Unregistering consumer from source queue") db = self.get_database(self.db_name) cx = db.cursor() cx.execute("select pgq.unregister_consumer(%s, %s)", [self.queue_name, self.consumer_name]) db.commit() def _launch_process_batch(self, db, batch_id, list): self.process_batch(db, batch_id, list) def _make_event(self, queue_name, row): return Event(queue_name, row) def _load_batch_events_old(self, curs, batch_id): """Fetch all events for this batch.""" # load events sql = "select * from pgq.get_batch_events(%d)" % batch_id if self.consumer_filter is not None: sql += " where %s" % self.consumer_filter curs.execute(sql) rows = curs.fetchall() # map them to python objects ev_list = [] for r in rows: ev = self._make_event(self.queue_name, r) ev_list.append(ev) return ev_list def _load_batch_events(self, curs, batch_id): """Fetch all events for this batch.""" if self.pgq_lazy_fetch: return self._batch_walker_class(curs, batch_id, self.queue_name, self.pgq_lazy_fetch, self.consumer_filter) else: return self._load_batch_events_old(curs, batch_id) def _load_next_batch(self, curs): """Allocate next batch. (internal)""" q = """select * from pgq.next_batch_custom(%s, %s, %s, %s, %s)""" curs.execute(q, [self.queue_name, self.consumer_name, self.pgq_min_lag, self.pgq_min_count, self.pgq_min_interval]) inf = curs.fetchone().copy() inf['tick_id'] = inf['cur_tick_id'] inf['batch_end'] = inf['cur_tick_time'] inf['batch_start'] = inf['prev_tick_time'] inf['seq_start'] = inf['prev_tick_event_seq'] inf['seq_end'] = inf['cur_tick_event_seq'] self.batch_info = inf return self.batch_info['batch_id'] def _finish_batch(self, curs, batch_id, list): """Tag events and notify that the batch is done.""" curs.execute("select pgq.finish_batch(%s)", [batch_id]) def stat_start(self): t = time.time() self.stat_batch_start = t if self.stat_batch_start - self.idle_start > self.keepalive_stats: self.stat_put('idle', round(self.stat_batch_start - self.idle_start,4)) self.idle_start = t def stat_end(self, count): t = time.time() self.stat_put('count', count) self.stat_put('duration', round(t - self.stat_batch_start,4)) if count > 0: # reset timer if we got some events self.stat_put('idle', round(self.stat_batch_start - self.idle_start,4)) self.idle_start = t skytools-3.2.6/python/pgq/coopconsumer.py0000644000000000000000000000471712426435645015521 0ustar """PgQ cooperative consumer for Python. """ from pgq.consumer import Consumer __all__ = ['CoopConsumer'] class CoopConsumer(Consumer): """Cooperative Consumer base class. There will be one dbscript process per subconsumer. Config params:: ## pgq.CoopConsumer # name for subconsumer subconsumer_name = # pgsql interval when to consider parallel subconsumers dead, # and take over their unfinished batch #subconsumer_timeout = 1 hour """ def __init__(self, service_name, db_name, args): """Initialize new subconsumer. @param service_name: service_name for DBScript @param db_name: name of database for get_database() @param args: cmdline args for DBScript """ Consumer.__init__(self, service_name, db_name, args) self.subconsumer_name = self.cf.get("subconsumer_name") self.subconsumer_timeout = self.cf.get("subconsumer_timeout", "") def register_consumer(self): """Registration for subconsumer.""" self.log.info("Registering consumer on source queue") db = self.get_database(self.db_name) cx = db.cursor() cx.execute("select pgq_coop.register_subconsumer(%s, %s, %s)", [self.queue_name, self.consumer_name, self.subconsumer_name]) res = cx.fetchone()[0] db.commit() return res def unregister_consumer(self): """Unregistration for subconsumer.""" self.log.info("Unregistering consumer from source queue") db = self.get_database(self.db_name) cx = db.cursor() cx.execute("select pgq_coop.unregister_subconsumer(%s, %s, %s, 0)", [self.queue_name, self.consumer_name, self.subconsumer_name]) db.commit() def _load_next_batch(self, curs): """Allocate next batch. (internal)""" if self.subconsumer_timeout: q = "select pgq_coop.next_batch(%s, %s, %s, %s)" curs.execute(q, [self.queue_name, self.consumer_name, self.subconsumer_name, self.subconsumer_timeout]) else: q = "select pgq_coop.next_batch(%s, %s, %s)" curs.execute(q, [self.queue_name, self.consumer_name, self.subconsumer_name]) return curs.fetchone()[0] def _finish_batch(self, curs, batch_id, list): """Finish batch. (internal)""" self._flush_retry(curs, batch_id, list) curs.execute("select pgq_coop.finish_batch(%s)", [batch_id]) skytools-3.2.6/python/pgq/localconsumer.py0000644000000000000000000001610212426435645015642 0ustar """ Consumer that stores last applied position in local file. For cases where the consumer cannot use single database for remote tracking. To be subclassed, then override .process_local_batch() or .process_local_event() methods. """ import sys import os import errno import skytools from pgq.baseconsumer import BaseConsumer __all__ = ['LocalConsumer'] class LocalConsumer(BaseConsumer): """Consumer that applies batches sequentially in second database. Requirements: - Whole batch in one TX. - Must not use retry queue. Features: - Can detect if several batches are already applied to dest db. - If some ticks are lost, allows to seek back on queue. Whether it succeeds, depends on pgq configuration. Config options:: ## Parameters for LocalConsumer ## # file location where last applied tick is tracked local_tracking_file = ~/state/%(job_name)s.tick """ def reload(self): super(LocalConsumer, self).reload() self.local_tracking_file = self.cf.getfile('local_tracking_file') if not os.path.exists(os.path.dirname(self.local_tracking_file)): raise skytools.UsageError ("path does not exist: %s" % self.local_tracking_file) def init_optparse(self, parser = None): p = super(LocalConsumer, self).init_optparse(parser) p.add_option("--rewind", action = "store_true", help = "change queue position according to local tick") p.add_option("--reset", action = "store_true", help = "reset local tick based on queue position") return p def startup(self): if self.options.rewind: self.rewind() sys.exit(0) if self.options.reset: self.dst_reset() sys.exit(0) super(LocalConsumer, self).startup() self.check_queue() def check_queue(self): queue_tick = -1 local_tick = self.load_local_tick() db = self.get_database(self.db_name) curs = db.cursor() q = "select last_tick from pgq.get_consumer_info(%s, %s)" curs.execute(q, [self.queue_name, self.consumer_name]) rows = curs.fetchall() if len(rows) == 1: queue_tick = rows[0]['last_tick'] db.commit() if queue_tick < 0: if local_tick >= 0: self.log.info("Registering consumer at tick %d", local_tick) q = "select * from pgq.register_consumer_at(%s, %s, %s)" curs.execute(q, [self.queue_name, self.consumer_name, local_tick]) else: self.log.info("Registering consumer at queue top") q = "select * from pgq.register_consumer(%s, %s)" curs.execute(q, [self.queue_name, self.consumer_name]) elif local_tick < 0: self.log.info("Local tick missing, storing queue tick %d", queue_tick) self.save_local_tick(queue_tick) elif local_tick > queue_tick: self.log.warning("Tracking out of sync: queue=%d local=%d. Repositioning on queue. [Database failure?]", queue_tick, local_tick) q = "select * from pgq.register_consumer_at(%s, %s, %s)" curs.execute(q, [self.queue_name, self.consumer_name, local_tick]) elif local_tick < queue_tick: self.log.warning("Tracking out of sync: queue=%d local=%d. Rewinding queue. [Lost file data?]", queue_tick, local_tick) q = "select * from pgq.register_consumer_at(%s, %s, %s)" curs.execute(q, [self.queue_name, self.consumer_name, local_tick]) else: self.log.info("Ticks match: Queue=%d Local=%d", queue_tick, local_tick) def work(self): if self.work_state < 0: self.check_queue() return super(LocalConsumer, self).work() def process_batch(self, db, batch_id, event_list): """Process all events in batch. """ # check if done if self.is_batch_done(): return # actual work self.process_local_batch(db, batch_id, event_list) # finish work self.set_batch_done() def process_local_batch(self, db, batch_id, event_list): """Overridable method to process whole batch.""" for ev in event_list: self.process_local_event(db, batch_id, ev) def process_local_event(self, db, batch_id, ev): """Overridable method to process one event at a time.""" raise Exception('process_local_event not implemented') def is_batch_done(self): """Helper function to keep track of last successful batch in external database. """ local_tick = self.load_local_tick() cur_tick = self.batch_info['tick_id'] prev_tick = self.batch_info['prev_tick_id'] if local_tick < 0: # seems this consumer has not run yet? return False if prev_tick == local_tick: # on track return False if cur_tick == local_tick: # current batch is already applied, skip it return True # anything else means problems raise Exception('Lost position: batch %d..%d, dst has %d' % ( prev_tick, cur_tick, local_tick)) def set_batch_done(self): """Helper function to set last successful batch in external database. """ tick_id = self.batch_info['tick_id'] self.save_local_tick(tick_id) def register_consumer(self): new = super(LocalConsumer, self).register_consumer() if new: # fixme self.dst_reset() def unregister_consumer(self): """If unregistering, also clean completed tick table on dest.""" super(LocalConsumer, self).unregister_consumer() self.dst_reset() def rewind(self): dst_tick = self.load_local_tick() if dst_tick >= 0: src_db = self.get_database(self.db_name) src_curs = src_db.cursor() self.log.info("Rewinding queue to local tick %d", dst_tick) q = "select pgq.register_consumer_at(%s, %s, %s)" src_curs.execute(q, [self.queue_name, self.consumer_name, dst_tick]) src_db.commit() else: self.log.error('Cannot rewind, no tick found in local file') def dst_reset(self): self.log.info("Removing local tracking file") try: os.remove(self.local_tracking_file) except: pass def load_local_tick(self): """Reads stored tick or -1.""" try: f = open(self.local_tracking_file, 'r') buf = f.read() f.close() data = buf.strip() if data: tick_id = int(data) else: tick_id = -1 return tick_id except IOError, ex: if ex.errno == errno.ENOENT: return -1 raise def save_local_tick(self, tick_id): """Store tick in local file.""" data = str(tick_id) skytools.write_atomic(self.local_tracking_file, data) skytools-3.2.6/python/pgq/cascade/0000755000000000000000000000000012426435645014005 5ustar skytools-3.2.6/python/pgq/cascade/admin.py0000644000000000000000000015475012426435645015463 0ustar #! /usr/bin/env python ## NB: not all commands work ## """Cascaded queue administration. londiste.py INI pause [NODE [CONS]] setadm.py INI pause NODE [CONS] """ import optparse import os.path import Queue import sys import threading import time import skytools from skytools import UsageError, DBError from pgq.cascade.nodeinfo import * __all__ = ['CascadeAdmin'] RESURRECT_DUMP_FILE = "resurrect-lost-events.json" command_usage = """\ %prog [options] INI CMD [subcmd args] Node Initialization: create-root NAME [PUBLIC_CONNSTR] create-branch NAME [PUBLIC_CONNSTR] --provider= create-leaf NAME [PUBLIC_CONNSTR] --provider= All of the above initialize a node Node Administration: pause Pause node worker resume Resume node worker wait-root Wait until node has caught up with root wait-provider Wait until node has caught up with provider status Show cascade state node-status Show status of local node members Show members in set Cascade layout change: change-provider --provider NEW_NODE Change where worker reads from takeover FROM_NODE [--all] [--dead] Take other node position drop-node NAME Remove node from cascade tag-dead NODE .. Tag node as dead tag-alive NODE .. Tag node as alive """ standalone_usage = """ setadm extra switches: pause/resume/change-provider: --node=NODE_NAME | --consumer=CONSUMER_NAME create-root/create-branch/create-leaf: --worker=WORKER_NAME """ class CascadeAdmin(skytools.AdminScript): """Cascaded PgQ administration.""" queue_name = None queue_info = None extra_objs = [] local_node = None root_node_name = None commands_without_pidfile = ['status', 'node-status', 'node-info'] def __init__(self, svc_name, dbname, args, worker_setup = False): skytools.AdminScript.__init__(self, svc_name, args) self.initial_db_name = dbname if worker_setup: self.options.worker = self.job_name self.options.consumer = self.job_name def init_optparse(self, parser = None): """Add SetAdmin switches to parser.""" p = skytools.AdminScript.init_optparse(self, parser) usage = command_usage + standalone_usage p.set_usage(usage.strip()) g = optparse.OptionGroup(p, "actual queue admin options") g.add_option("--connstr", action="store_true", help = "initial connect string") g.add_option("--provider", help = "init: connect string for provider") g.add_option("--queue", help = "specify queue name") g.add_option("--worker", help = "create: specify worker name") g.add_option("--node", help = "specify node name") g.add_option("--consumer", help = "specify consumer name") g.add_option("--target", help = "takeover: specify node to take over") g.add_option("--merge", help = "create-node: combined queue name") g.add_option("--dead", action="append", help = "tag some node as dead") g.add_option("--dead-root", action="store_true", help = "tag some node as dead") g.add_option("--dead-branch", action="store_true", help = "tag some node as dead") g.add_option("--sync-watermark", help = "list of node names to sync with") p.add_option_group(g) return p def reload(self): """Reload config.""" skytools.AdminScript.reload(self) if self.options.queue: self.queue_name = self.options.queue else: self.queue_name = self.cf.get('queue_name', '') if not self.queue_name: self.queue_name = self.cf.get('pgq_queue_name', '') if not self.queue_name: raise Exception('"queue_name" not specified in config') # # Node initialization. # def cmd_install(self): db = self.get_database(self.initial_db_name) self.install_code(db) def cmd_create_root(self, *args): return self.create_node('root', args) def cmd_create_branch(self, *args): return self.create_node('branch', args) def cmd_create_leaf(self, *args): return self.create_node('leaf', args) def create_node(self, node_type, args): """Generic node init.""" if node_type not in ('root', 'branch', 'leaf'): raise Exception('unknown node type') # load node name if len(args) > 0: node_name = args[0] else: node_name = self.cf.get('node_name', '') if not node_name: raise UsageError('Node name must be given either in command line or config') # load node public location if len(args) > 1: node_location = args[1] else: node_location = self.cf.get('public_node_location', '') if not node_location: raise UsageError('Node public location must be given either in command line or config') if len(args) > 2: raise UsageError('Too many args, only node name and public connect string allowed') # load provider provider_loc = self.options.provider if not provider_loc: provider_loc = self.cf.get('initial_provider_location', '') # check if sane ok = 0 for k, v in skytools.parse_connect_string(node_location): if k in ('host', 'service'): ok = 1 break if not ok: self.log.warning('No host= in public connect string, bad idea') # connect to database db = self.get_database(self.initial_db_name) # check if code is installed self.install_code(db) # query current status res = self.exec_query(db, "select * from pgq_node.get_node_info(%s)", [self.queue_name]) info = res[0] if info['node_type'] is not None: self.log.info("Node is already initialized as %s", info['node_type']) return # check if public connstr is sane self.check_public_connstr(db, node_location) self.log.info("Initializing node") node_attrs = {} worker_name = self.options.worker if not worker_name: raise Exception('--worker required') combined_queue = self.options.merge if combined_queue and node_type != 'leaf': raise Exception('--merge can be used only for leafs') if self.options.sync_watermark: if node_type != 'branch': raise UsageError('--sync-watermark can be used only for branch nodes') node_attrs['sync_watermark'] = self.options.sync_watermark # register member if node_type == 'root': global_watermark = None combined_queue = None provider_name = None self.exec_cmd(db, "select * from pgq_node.register_location(%s, %s, %s, false)", [self.queue_name, node_name, node_location]) self.exec_cmd(db, "select * from pgq_node.create_node(%s, %s, %s, %s, %s, %s, %s)", [self.queue_name, node_type, node_name, worker_name, provider_name, global_watermark, combined_queue]) provider_db = None else: if not provider_loc: raise Exception('Please specify --provider') root_db = self.find_root_db(provider_loc) queue_info = self.load_queue_info(root_db) # check if member already exists if queue_info.get_member(node_name) is not None: self.log.error("Node '%s' already exists", node_name) sys.exit(1) combined_set = None provider_db = self.get_database('provider_db', connstr = provider_loc, profile = 'remote') q = "select node_type, node_name from pgq_node.get_node_info(%s)" res = self.exec_query(provider_db, q, [self.queue_name]) row = res[0] if not row['node_name']: raise Exception("provider node not found") provider_name = row['node_name'] # register member on root self.exec_cmd(root_db, "select * from pgq_node.register_location(%s, %s, %s, false)", [self.queue_name, node_name, node_location]) # lookup provider provider = queue_info.get_member(provider_name) if not provider: self.log.error("Node %s does not exist", provider_name) sys.exit(1) # register on provider self.exec_cmd(provider_db, "select * from pgq_node.register_location(%s, %s, %s, false)", [self.queue_name, node_name, node_location]) rows = self.exec_cmd(provider_db, "select * from pgq_node.register_subscriber(%s, %s, %s, null)", [self.queue_name, node_name, worker_name]) global_watermark = rows[0]['global_watermark'] # initialize node itself # insert members self.exec_cmd(db, "select * from pgq_node.register_location(%s, %s, %s, false)", [self.queue_name, node_name, node_location]) for m in queue_info.member_map.values(): self.exec_cmd(db, "select * from pgq_node.register_location(%s, %s, %s, %s)", [self.queue_name, m.name, m.location, m.dead]) # real init self.exec_cmd(db, "select * from pgq_node.create_node(%s, %s, %s, %s, %s, %s, %s)", [ self.queue_name, node_type, node_name, worker_name, provider_name, global_watermark, combined_queue ]) self.extra_init(node_type, db, provider_db) if node_attrs: s_attrs = skytools.db_urlencode(node_attrs) self.exec_cmd(db, "select * from pgq_node.set_node_attrs(%s, %s)", [self.queue_name, s_attrs]) self.log.info("Done") def check_public_connstr(self, db, pub_connstr): """Look if public and local connect strings point to same db's. """ pub_db = self.get_database("pub_db", connstr = pub_connstr, profile = 'remote') curs1 = db.cursor() curs2 = pub_db.cursor() q = "select oid, datname, txid_current() as txid, txid_current_snapshot() as snap"\ " from pg_catalog.pg_database where datname = current_database()" curs1.execute(q) res1 = curs1.fetchone() db.commit() curs2.execute(q) res2 = curs2.fetchone() pub_db.commit() curs1.execute(q) res3 = curs1.fetchone() db.commit() self.close_database("pub_db") failure = 0 if (res1['oid'], res1['datname']) != (res2['oid'], res2['datname']): failure += 1 sn1 = skytools.Snapshot(res1['snap']) tx = res2['txid'] sn2 = skytools.Snapshot(res3['snap']) if sn1.contains(tx): failure += 2 elif not sn2.contains(tx): failure += 4 if failure: raise UsageError("Public connect string points to different database than local connect string (fail=%d)" % failure) def extra_init(self, node_type, node_db, provider_db): """Callback to do specific init.""" pass def find_root_db(self, initial_loc = None): """Find root node, having start point.""" if initial_loc: loc = initial_loc db = self.get_database('root_db', connstr = loc, profile = 'remote') else: loc = self.cf.get(self.initial_db_name) db = self.get_database('root_db', connstr = loc) while 1: # query current status res = self.exec_query(db, "select * from pgq_node.get_node_info(%s)", [self.queue_name]) info = res[0] node_type = info['node_type'] if node_type is None: self.log.info("Root node not initialized?") sys.exit(1) self.log.debug("db='%s' -- type='%s' provider='%s'", loc, node_type, info['provider_location']) # configured db may not be root anymore, walk upwards then if node_type in ('root', 'combined-root'): db.commit() self.root_node_name = info['node_name'] return db self.close_database('root_db') if loc == info['provider_location']: raise Exception("find_root_db: got loop: %s" % loc) loc = info['provider_location'] if loc is None: self.log.error("Sub node provider not initialized?") sys.exit(1) db = self.get_database('root_db', connstr = loc, profile = 'remote') raise Exception('process canceled') def find_root_node(self): self.find_root_db() return self.root_node_name def find_consumer_check(self, node, consumer): cmap = self.get_node_consumer_map(node) return (consumer in cmap) def find_consumer(self, node = None, consumer = None): if not node and not consumer: node = self.options.node consumer = self.options.consumer if not node and not consumer: raise Exception('Need either --node or --consumer') # specific node given if node: if consumer: if not self.find_consumer_check(node, consumer): raise Exception('Consumer not found') else: state = self.get_node_info(node) consumer = state.worker_name return (node, consumer) # global consumer search if self.find_consumer_check(self.local_node, consumer): return (self.local_node, consumer) # fixme: dead node handling? nodelist = self.queue_info.member_map.keys() for node in nodelist: if node == self.local_node: continue if self.find_consumer_check(node, consumer): return (node, consumer) raise Exception('Consumer not found') def install_code(self, db): """Install cascading code to db.""" objs = [ skytools.DBLanguage("plpgsql"), #skytools.DBFunction("txid_current_snapshot", 0, sql_file="txid.sql"), skytools.DBSchema("pgq", sql_file="pgq.sql"), skytools.DBFunction("pgq.get_batch_cursor", 3, sql_file = "pgq.upgrade.2to3.sql"), skytools.DBSchema("pgq_ext", sql_file="pgq_ext.sql"), # not needed actually skytools.DBSchema("pgq_node", sql_file="pgq_node.sql"), ] objs += self.extra_objs skytools.db_install(db.cursor(), objs, self.log) db.commit() # # Print status of whole set. # def cmd_status(self): """Show set status.""" self.load_local_info() # prepare data for workers members = Queue.Queue() for m in self.queue_info.member_map.itervalues(): cstr = self.add_connect_string_profile(m.location, 'remote') members.put( (m.name, cstr) ) nodes = Queue.Queue() # launch workers and wait num_nodes = len(self.queue_info.member_map) num_threads = max (min (num_nodes / 4, 100), 1) tlist = [] for i in range(num_threads): t = threading.Thread (target = self._cmd_status_worker, args = (members, nodes)) t.daemon = True t.start() tlist.append(t) #members.join() for t in tlist: t.join() while True: try: node = nodes.get_nowait() except Queue.Empty: break self.queue_info.add_node(node) self.queue_info.print_tree() def _cmd_status_worker (self, members, nodes): # members in, nodes out, both thread-safe while True: try: node_name, node_connstr = members.get_nowait() except Queue.Empty: break node = self.load_node_status (node_name, node_connstr) nodes.put(node) members.task_done() def load_node_status (self, name, location): """ Load node info & status """ # must be thread-safe (!) if not self.node_alive(name): node = NodeInfo(self.queue_name, None, node_name = name) return node try: db = None db = skytools.connect_database (location) db.set_isolation_level (skytools.I_AUTOCOMMIT) curs = db.cursor() curs.execute("select * from pgq_node.get_node_info(%s)", [self.queue_name]) node = NodeInfo(self.queue_name, curs.fetchone()) node.load_status(curs) self.load_extra_status(curs, node) except DBError, d: msg = str(d).strip().split('\n', 1)[0].strip() print('Node %r failure: %s' % (name, msg)) node = NodeInfo(self.queue_name, None, node_name = name) finally: if db: db.close() return node def cmd_node_status(self): """ Show status of a local node. """ self.load_local_info() db = self.get_node_database(self.local_node) curs = db.cursor() node = self.queue_info.local_node node.load_status(curs) self.load_extra_status(curs, node) subscriber_nodes = self.get_node_subscriber_list(self.local_node) offset=4*' ' print node.get_title() print offset+'Provider: %s' % node.provider_node print offset+'Subscribers: %s' % ', '.join(subscriber_nodes) for l in node.get_infolines(): print offset+l def load_extra_status(self, curs, node): """Fetch extra info.""" # must be thread-safe (!) pass # # Normal commands. # def cmd_change_provider(self): """Change node provider.""" self.load_local_info() self.change_provider( node = self.options.node, consumer = self.options.consumer, new_provider = self.options.provider) def node_change_provider(self, node, new_provider): self.change_provider(node, new_provider = new_provider) def change_provider(self, node = None, consumer = None, new_provider = None): old_provider = None if not new_provider: raise Exception('Please give --provider') if not node or not consumer: node, consumer = self.find_consumer(node = node, consumer = consumer) if node == new_provider: raise UsageError ("cannot subscribe to itself") cmap = self.get_node_consumer_map(node) cinfo = cmap[consumer] old_provider = cinfo['provider_node'] if old_provider == new_provider: self.log.info("Consumer '%s' at node '%s' has already '%s' as provider", consumer, node, new_provider) return # pause target node self.pause_consumer(node, consumer) # reload node info node_db = self.get_node_database(node) qinfo = self.load_queue_info(node_db) ninfo = qinfo.local_node node_location = qinfo.get_member(node).location # reload consumer info cmap = self.get_node_consumer_map(node) cinfo = cmap[consumer] # is it node worker or plain consumer? is_worker = (ninfo.worker_name == consumer) # fixme: expect the node to be described already q = "select * from pgq_node.register_location(%s, %s, %s, false)" self.node_cmd(new_provider, q, [self.queue_name, node, node_location]) # subscribe on new provider if is_worker: q = 'select * from pgq_node.register_subscriber(%s, %s, %s, %s)' self.node_cmd(new_provider, q, [self.queue_name, node, consumer, cinfo['last_tick_id']]) else: q = 'select * from pgq.register_consumer_at(%s, %s, %s)' self.node_cmd(new_provider, q, [self.queue_name, consumer, cinfo['last_tick_id']]) # change provider on target node q = 'select * from pgq_node.change_consumer_provider(%s, %s, %s)' self.node_cmd(node, q, [self.queue_name, consumer, new_provider]) # done self.resume_consumer(node, consumer) # unsubscribe from old provider try: if is_worker: q = "select * from pgq_node.unregister_subscriber(%s, %s)" self.node_cmd(old_provider, q, [self.queue_name, node]) else: q = "select * from pgq.unregister_consumer(%s, %s)" self.node_cmd(old_provider, q, [self.queue_name, consumer]) except skytools.DBError, d: self.log.warning("failed to unregister from old provider (%s): %s", old_provider, str(d)) def cmd_rename_node(self, old_name, new_name): """Rename node.""" self.load_local_info() root_db = self.find_root_db() # pause target node self.pause_node(old_name) node = self.load_node_info(old_name) provider_node = node.provider_node subscriber_list = self.get_node_subscriber_list(old_name) # create copy of member info / subscriber+queue info step1 = 'select * from pgq_node.rename_node_step1(%s, %s, %s)' # rename node itself, drop copies step2 = 'select * from pgq_node.rename_node_step2(%s, %s, %s)' # step1 self.exec_cmd(root_db, step1, [self.queue_name, old_name, new_name]) self.node_cmd(provider_node, step1, [self.queue_name, old_name, new_name]) self.node_cmd(old_name, step1, [self.queue_name, old_name, new_name]) for child in subscriber_list: self.node_cmd(child, step1, [self.queue_name, old_name, new_name]) # step1 self.node_cmd(old_name, step2, [self.queue_name, old_name, new_name]) self.node_cmd(provider_node, step1, [self.queue_name, old_name, new_name]) for child in subscriber_list: self.node_cmd(child, step2, [self.queue_name, old_name, new_name]) self.exec_cmd(root_db, step2, [self.queue_name, old_name, new_name]) # resume node self.resume_node(old_name) def cmd_drop_node(self, node_name): """Drop a node.""" self.load_local_info() try: node = self.load_node_info(node_name) if node: # see if we can safely drop subscriber_list = self.get_node_subscriber_list(node_name) if subscriber_list: raise UsageError('node still has subscribers') except skytools.DBError: pass try: # unregister node location from root node (event will be added to queue) if node and node.type == 'root': pass else: root_db = self.find_root_db() q = "select * from pgq_node.unregister_location(%s, %s)" self.exec_cmd(root_db, q, [self.queue_name, node_name]) except skytools.DBError, d: self.log.warning("Unregister from root failed: %s", str(d)) try: # drop node info db = self.get_node_database(node_name) q = "select * from pgq_node.drop_node(%s, %s)" self.exec_cmd(db, q, [self.queue_name, node_name]) except skytools.DBError, d: self.log.warning("Local drop failure: %s", str(d)) # brute force removal for n in self.queue_info.member_map.values(): try: q = "select * from pgq_node.drop_node(%s, %s)" self.node_cmd(n.name, q, [self.queue_name, node_name]) except skytools.DBError, d: self.log.warning("Failed to remove from '%s': %s", n.name, str(d)) def node_depends(self, sub_node, top_node): cur_node = sub_node # walk upstream while 1: info = self.get_node_info(cur_node) if cur_node == top_node: # yes, top_node is sub_node's provider return True if info.type == 'root': # found root, no dependancy return False # step upwards cur_node = info.provider_node def demote_node(self, oldnode, step, newnode): """Downgrade old root?""" q = "select * from pgq_node.demote_root(%s, %s, %s)" res = self.node_cmd(oldnode, q, [self.queue_name, step, newnode]) if res: return res[0]['last_tick'] def promote_branch(self, node): """Promote old branch as root.""" q = "select * from pgq_node.promote_branch(%s)" self.node_cmd(node, q, [self.queue_name]) def wait_for_catchup(self, new, last_tick): """Wait until new_node catches up to old_node.""" # wait for it on subscriber info = self.load_node_info(new) if info.completed_tick >= last_tick: self.log.info('tick already exists') return info if info.paused: self.log.info('new node seems paused, resuming') self.resume_node(new) while 1: self.log.debug('waiting for catchup: need=%d, cur=%d', last_tick, info.completed_tick) time.sleep(1) info = self.load_node_info(new) if info.completed_tick >= last_tick: return info def takeover_root(self, old_node_name, new_node_name, failover = False): """Root switchover.""" new_info = self.get_node_info(new_node_name) old_info = None if self.node_alive(old_node_name): # old root works, switch properly old_info = self.get_node_info(old_node_name) self.pause_node(old_node_name) self.demote_node(old_node_name, 1, new_node_name) last_tick = self.demote_node(old_node_name, 2, new_node_name) self.wait_for_catchup(new_node_name, last_tick) else: # find latest tick on local node q = "select * from pgq.get_queue_info(%s)" db = self.get_node_database(new_node_name) curs = db.cursor() curs.execute(q, [self.queue_name]) row = curs.fetchone() last_tick = row['last_tick_id'] db.commit() # find if any other node has more ticks other_node = None other_tick = last_tick sublist = self.find_subscribers_for(old_node_name) for n in sublist: q = "select * from pgq_node.get_node_info(%s)" rows = self.node_cmd(n, q, [self.queue_name]) info = rows[0] if info['worker_last_tick'] > other_tick: other_tick = info['worker_last_tick'] other_node = n # if yes, load batches from there if other_node: self.change_provider(new_node_name, new_provider = other_node) self.wait_for_catchup(new_node_name, other_tick) last_tick = other_tick # promote new root self.pause_node(new_node_name) self.promote_branch(new_node_name) # register old root on new root as subscriber if self.node_alive(old_node_name): old_worker_name = old_info.worker_name else: old_worker_name = self.failover_consumer_name(old_node_name) q = 'select * from pgq_node.register_subscriber(%s, %s, %s, %s)' self.node_cmd(new_node_name, q, [self.queue_name, old_node_name, old_worker_name, last_tick]) # unregister new root from old root q = "select * from pgq_node.unregister_subscriber(%s, %s)" self.node_cmd(new_info.provider_node, q, [self.queue_name, new_node_name]) # launch new node self.resume_node(new_node_name) # demote & launch old node if self.node_alive(old_node_name): self.demote_node(old_node_name, 3, new_node_name) self.resume_node(old_node_name) def takeover_nonroot(self, old_node_name, new_node_name, failover): """Non-root switchover.""" if self.node_depends(new_node_name, old_node_name): # yes, old_node is new_nodes provider, # switch it around pnode = self.find_provider(old_node_name) self.node_change_provider(new_node_name, pnode) self.node_change_provider(old_node_name, new_node_name) def cmd_takeover(self, old_node_name): """Generic node switchover.""" self.log.info("old: %s", old_node_name) self.load_local_info() new_node_name = self.options.node if not new_node_name: worker = self.options.consumer if not worker: raise UsageError('old node not given') if self.queue_info.local_node.worker_name != worker: raise UsageError('old node not given') new_node_name = self.local_node if not old_node_name: raise UsageError('old node not given') if old_node_name not in self.queue_info.member_map: raise UsageError('Unknown node: %s' % old_node_name) if self.options.dead_root: otype = 'root' failover = True elif self.options.dead_branch: otype = 'branch' failover = True else: onode = self.get_node_info(old_node_name) otype = onode.type failover = False if failover: self.cmd_tag_dead(old_node_name) new_node = self.get_node_info(new_node_name) if old_node_name == new_node.name: self.log.info("same node?") return if otype == 'root': self.takeover_root(old_node_name, new_node_name, failover) else: self.takeover_nonroot(old_node_name, new_node_name, failover) # switch subscribers around if self.options.all or failover: for n in self.find_subscribers_for(old_node_name): if n != new_node_name: self.node_change_provider(n, new_node_name) def find_provider(self, node_name): if self.node_alive(node_name): info = self.get_node_info(node_name) return info.provider_node nodelist = self.queue_info.member_map.keys() for n in nodelist: if n == node_name: continue if not self.node_alive(n): continue if node_name in self.get_node_subscriber_list(n): return n return self.find_root_node() def find_subscribers_for(self, parent_node_name): """Find subscribers for particular node.""" # use dict to eliminate duplicates res = {} nodelist = self.queue_info.member_map.keys() for node_name in nodelist: if node_name == parent_node_name: continue if not self.node_alive(node_name): continue n = self.get_node_info(node_name) if not n: continue if n.provider_node == parent_node_name: res[n.name] = 1 return res.keys() def cmd_tag_dead(self, dead_node_name): self.load_local_info() # tag node dead in memory self.log.info("Tagging node '%s' as dead", dead_node_name) self.queue_info.tag_dead(dead_node_name) # tag node dead in local node q = "select * from pgq_node.register_location(%s, %s, null, true)" self.node_cmd(self.local_node, q, [self.queue_name, dead_node_name]) # tag node dead in other nodes nodelist = self.queue_info.member_map.keys() for node_name in nodelist: if not self.node_alive(node_name): continue if node_name == dead_node_name: continue if node_name == self.local_node: continue try: q = "select * from pgq_node.register_location(%s, %s, null, true)" self.node_cmd(node_name, q, [self.queue_name, dead_node_name]) except DBError, d: msg = str(d).strip().split('\n', 1)[0] print('Node %s failure: %s' % (node_name, msg)) self.close_node_database(node_name) def cmd_pause(self): """Pause a node""" self.load_local_info() node, consumer = self.find_consumer() self.pause_consumer(node, consumer) def cmd_resume(self): """Resume a node from pause.""" self.load_local_info() node, consumer = self.find_consumer() self.resume_consumer(node, consumer) def cmd_members(self): """Show member list.""" self.load_local_info() db = self.get_database(self.initial_db_name) desc = 'Member info on %s@%s:' % (self.local_node, self.queue_name) q = "select node_name, dead, node_location"\ " from pgq_node.get_queue_locations(%s) order by 1" self.display_table(db, desc, q, [self.queue_name]) def cmd_node_info(self): self.load_local_info() q = self.queue_info n = q.local_node m = q.get_member(n.name) stlist = [] if m.dead: stlist.append('DEAD') if n.paused: stlist.append("PAUSED") if not n.uptodate: stlist.append("NON-UP-TO-DATE") st = ', '.join(stlist) if not st: st = 'OK' print('Node: %s Type: %s Queue: %s' % (n.name, n.type, q.queue_name)) print('Status: %s' % st) if n.type != 'root': print('Provider: %s' % n.provider_node) else: print('Provider: --') print('Connect strings:') print(' Local : %s' % self.cf.get('db')) print(' Public : %s' % m.location) if n.type != 'root': print(' Provider: %s' % n.provider_location) if n.combined_queue: print('Combined Queue: %s (node type: %s)' % (n.combined_queue, n.combined_type)) def cmd_wait_root(self): """Wait for next tick from root.""" self.load_local_info() if self.queue_info.local_node.type == 'root': self.log.info("Current node is root, no need to wait") return self.log.info("Finding root node") root_node = self.find_root_node() self.log.info("Root is %s", root_node) dst_db = self.get_database(self.initial_db_name) self.wait_for_node(dst_db, root_node) def cmd_wait_provider(self): """Wait for next tick from provider.""" self.load_local_info() if self.queue_info.local_node.type == 'root': self.log.info("Current node is root, no need to wait") return dst_db = self.get_database(self.initial_db_name) node = self.queue_info.local_node.provider_node self.log.info("Provider is %s", node) self.wait_for_node(dst_db, node) def wait_for_node(self, dst_db, node_name): """Core logic for waiting.""" self.log.info("Fetching last tick for %s", node_name) node_info = self.load_node_info(node_name) tick_id = node_info.last_tick self.log.info("Waiting for tick > %d", tick_id) q = "select * from pgq_node.get_node_info(%s)" dst_curs = dst_db.cursor() while 1: dst_curs.execute(q, [self.queue_name]) row = dst_curs.fetchone() dst_db.commit() if row['ret_code'] >= 300: self.log.warning("Problem: %s", row['ret_code'], row['ret_note']) return if row['worker_last_tick'] > tick_id: self.log.info("Got tick %d, exiting", row['worker_last_tick']) break self.sleep(2) def cmd_resurrect(self): """Convert out-of-sync old root to branch and sync queue contents. """ self.load_local_info() db = self.get_database(self.initial_db_name) curs = db.cursor() # stop if leaf if self.queue_info.local_node.type == 'leaf': self.log.info("Current node is leaf, nothing to do") return # stop if dump file exists if os.path.lexists(RESURRECT_DUMP_FILE): self.log.error("Dump file exists, cannot perform resurrection: %s", RESURRECT_DUMP_FILE) sys.exit(1) # # Find failover position # self.log.info("** Searching for gravestone **") # load subscribers sub_list = [] q = "select * from pgq_node.get_subscriber_info(%s)" curs.execute(q, [self.queue_name]) for row in curs.fetchall(): sub_list.append(row['node_name']) db.commit() # find backup subscription this_node = self.queue_info.local_node.name failover_cons = self.failover_consumer_name(this_node) full_list = self.queue_info.member_map.keys() done_nodes = { this_node: 1 } prov_node = None root_node = None for node_name in sub_list + full_list: if node_name in done_nodes: continue done_nodes[node_name] = 1 if not self.node_alive(node_name): self.log.info('Node %s is dead, skipping', node_name) continue self.log.info('Looking on node %s', node_name) node_db = None try: node_db = self.get_node_database(node_name) node_curs = node_db.cursor() node_curs.execute("select * from pgq.get_consumer_info(%s, %s)", [self.queue_name, failover_cons]) cons_rows = node_curs.fetchall() node_curs.execute("select * from pgq_node.get_node_info(%s)", [self.queue_name]) node_info = node_curs.fetchone() node_db.commit() if len(cons_rows) == 1: if prov_node: raise Exception('Unexpected situation: there are two gravestones - on nodes %s and %s' % (prov_node, node_name)) prov_node = node_name failover_tick = cons_rows[0]['last_tick'] self.log.info("Found gravestone on node: %s", node_name) if node_info['node_type'] == 'root': self.log.info("Found new root node: %s", node_name) root_node = node_name self.close_node_database(node_name) node_db = None if root_node and prov_node: break except skytools.DBError: self.log.warning("failed to check node %s", node_name) if node_db: self.close_node_database(node_name) node_db = None if not root_node: self.log.error("Cannot find new root node", failover_cons) sys.exit(1) if not prov_node: self.log.error("Cannot find failover position (%s)", failover_cons) sys.exit(1) # load worker state q = "select * from pgq_node.get_worker_state(%s)" rows = self.exec_cmd(db, q, [self.queue_name]) state = rows[0] # demote & pause self.log.info("** Demote & pause local node **") if self.queue_info.local_node.type == 'root': self.log.info('Node %s is root, demoting', this_node) q = "select * from pgq_node.demote_root(%s, %s, %s)" self.exec_cmd(db, q, [self.queue_name, 1, prov_node]) self.exec_cmd(db, q, [self.queue_name, 2, prov_node]) # change node type and set worker paused in same TX curs = db.cursor() self.exec_cmd(curs, q, [self.queue_name, 3, prov_node]) q = "select * from pgq_node.set_consumer_paused(%s, %s, true)" self.exec_cmd(curs, q, [self.queue_name, state['worker_name']]) db.commit() elif not state['paused']: # pause worker, don't wait for reaction, as it may be dead self.log.info('Node %s is branch, pausing worker: %s', this_node, state['worker_name']) q = "select * from pgq_node.set_consumer_paused(%s, %s, true)" self.exec_cmd(db, q, [self.queue_name, state['worker_name']]) else: self.log.info('Node %s is branch and worker is paused', this_node) # # Drop old consumers and subscribers # self.log.info("** Dropping old subscribers and consumers **") # unregister subscriber nodes q = "select pgq_node.unregister_subscriber(%s, %s)" for node_name in sub_list: self.log.info("Dropping old subscriber node: %s", node_name) curs.execute(q, [self.queue_name, node_name]) # unregister consumers q = "select consumer_name from pgq.get_consumer_info(%s)" curs.execute(q, [self.queue_name]) for row in curs.fetchall(): cname = row['consumer_name'] if cname[0] == '.': self.log.info("Keeping consumer: %s", cname) continue self.log.info("Dropping old consumer: %s", cname) q = "pgq.unregister_consumer(%s, %s)" curs.execute(q, [self.queue_name, cname]) db.commit() # dump events self.log.info("** Dump & delete lost events **") stats = self.resurrect_process_lost_events(db, failover_tick) self.log.info("** Subscribing %s to %s **", this_node, prov_node) # set local position self.log.info("Reset local completed pos") q = "select * from pgq_node.set_consumer_completed(%s, %s, %s)" self.exec_cmd(db, q, [self.queue_name, state['worker_name'], failover_tick]) # rename gravestone self.log.info("Rename gravestone to worker: %s", state['worker_name']) prov_db = self.get_node_database(prov_node) prov_curs = prov_db.cursor() q = "select * from pgq_node.unregister_subscriber(%s, %s)" self.exec_cmd(prov_curs, q, [self.queue_name, this_node], quiet = True) q = "select ret_code, ret_note, global_watermark"\ " from pgq_node.register_subscriber(%s, %s, %s, %s)" res = self.exec_cmd(prov_curs, q, [self.queue_name, this_node, state['worker_name'], failover_tick], quiet = True) global_wm = res[0]['global_watermark'] prov_db.commit() # import new global watermark self.log.info("Reset global watermark") q = "select * from pgq_node.set_global_watermark(%s, %s)" self.exec_cmd(db, q, [self.queue_name, global_wm], quiet = True) # show stats if stats: self.log.info("** Statistics **") klist = stats.keys() klist.sort() for k in klist: v = stats[k] self.log.info(" %s: %s", k, v) self.log.info("** Resurrection done, worker paused **") def resurrect_process_lost_events(self, db, failover_tick): curs = db.cursor() this_node = self.queue_info.local_node.name cons_name = this_node + '.dumper' self.log.info("Dumping lost events") # register temp consumer on queue q = "select pgq.register_consumer_at(%s, %s, %s)" curs.execute(q, [self.queue_name, cons_name, failover_tick]) db.commit() # process events as usual total_count = 0 final_tick_id = -1 stats = {} while 1: q = "select * from pgq.next_batch_info(%s, %s)" curs.execute(q, [self.queue_name, cons_name]) b = curs.fetchone() batch_id = b['batch_id'] if batch_id is None: break final_tick_id = b['cur_tick_id'] q = "select * from pgq.get_batch_events(%s)" curs.execute(q, [batch_id]) cnt = 0 for ev in curs.fetchall(): cnt += 1 total_count += 1 self.resurrect_dump_event(ev, stats, b) q = "select pgq.finish_batch(%s)" curs.execute(q, [batch_id]) if cnt > 0: db.commit() stats['dumped_count'] = total_count self.resurrect_dump_finish() self.log.info("%s events dumped", total_count) # unregiser consumer q = "select pgq.unregister_consumer(%s, %s)" curs.execute(q, [self.queue_name, cons_name]) db.commit() if failover_tick == final_tick_id: self.log.info("No batches found") return None # # Delete the events from queue # # This is done snapshots, to make sure we delete only events # that were dumped out previously. This uses the long-tx # resistant logic described in pgq.batch_event_sql(). # # find snapshots q = "select t1.tick_snapshot as s1, t2.tick_snapshot as s2"\ " from pgq.tick t1, pgq.tick t2"\ " where t1.tick_id = %s"\ " and t2.tick_id = %s" curs.execute(q, [failover_tick, final_tick_id]) ticks = curs.fetchone() s1 = skytools.Snapshot(ticks['s1']) s2 = skytools.Snapshot(ticks['s2']) xlist = [] for tx in s1.txid_list: if s2.contains(tx): xlist.append(str(tx)) # create where clauses W1 = None if len(xlist) > 0: W1 = "ev_txid in (%s)" % (",".join(xlist),) W2 = "ev_txid >= %d AND ev_txid <= %d"\ " and not txid_visible_in_snapshot(ev_txid, '%s')"\ " and txid_visible_in_snapshot(ev_txid, '%s')" % ( s1.xmax, s2.xmax, ticks['s1'], ticks['s2']) # loop over all queue data tables q = "select * from pgq.queue where queue_name = %s" curs.execute(q, [self.queue_name]) row = curs.fetchone() ntables = row['queue_ntables'] tbl_pfx = row['queue_data_pfx'] schema, table = tbl_pfx.split('.') total_del_count = 0 self.log.info("Deleting lost events") for i in range(ntables): del_count = 0 self.log.debug("Deleting events from table %d", i) qtbl = "%s.%s" % (skytools.quote_ident(schema), skytools.quote_ident(table + '_' + str(i))) q = "delete from " + qtbl + " where " if W1: self.log.debug(q + W1) curs.execute(q + W1) if curs.rowcount and curs.rowcount > 0: del_count += curs.rowcount self.log.debug(q + W2) curs.execute(q + W2) if curs.rowcount and curs.rowcount > 0: del_count += curs.rowcount total_del_count += del_count self.log.debug('%d events deleted', del_count) self.log.info('%d events deleted', total_del_count) stats['deleted_count'] = total_del_count # delete new ticks q = "delete from pgq.tick t using pgq.queue q"\ " where q.queue_name = %s"\ " and t.tick_queue = q.queue_id"\ " and t.tick_id > %s"\ " and t.tick_id <= %s" curs.execute(q, [self.queue_name, failover_tick, final_tick_id]) self.log.info("%s ticks deleted", curs.rowcount) db.commit() return stats _json_dump_file = None def resurrect_dump_event(self, ev, stats, batch_info): if self._json_dump_file is None: self._json_dump_file = open(RESURRECT_DUMP_FILE, 'w') sep = '[' else: sep = ',' # create ordinary dict to avoid problems with row class and datetime d = { 'ev_id': ev.ev_id, 'ev_type': ev.ev_type, 'ev_data': ev.ev_data, 'ev_extra1': ev.ev_extra1, 'ev_extra2': ev.ev_extra2, 'ev_extra3': ev.ev_extra3, 'ev_extra4': ev.ev_extra4, 'ev_time': ev.ev_time.isoformat(), 'ev_txid': ev.ev_txid, 'ev_retry': ev.ev_retry, 'tick_id': batch_info['cur_tick_id'], 'prev_tick_id': batch_info['prev_tick_id'], } jsev = skytools.json_encode(d) s = sep + '\n' + jsev self._json_dump_file.write(s) def resurrect_dump_finish(self): if self._json_dump_file: self._json_dump_file.write('\n]\n') self._json_dump_file.close() self._json_dump_file = None def failover_consumer_name(self, node_name): return node_name + ".gravestone" # # Shortcuts for operating on nodes. # def load_local_info(self): """fetch set info from local node.""" db = self.get_database(self.initial_db_name) self.queue_info = self.load_queue_info(db) self.local_node = self.queue_info.local_node.name def get_node_database(self, node_name): """Connect to node.""" if node_name == self.queue_info.local_node.name: db = self.get_database(self.initial_db_name) else: m = self.queue_info.get_member(node_name) if not m: self.log.error("get_node_database: cannot resolve %s", node_name) sys.exit(1) #self.log.info("%s: dead=%s", m.name, m.dead) if m.dead: return None loc = m.location db = self.get_database('node.' + node_name, connstr = loc, profile = 'remote') return db def node_alive(self, node_name): m = self.queue_info.get_member(node_name) if not m: res = False elif m.dead: res = False else: res = True #self.log.warning('node_alive(%s) = %s', node_name, res) return res def close_node_database(self, node_name): """Disconnect node's connection.""" if node_name == self.queue_info.local_node.name: self.close_database(self.initial_db_name) else: self.close_database("node." + node_name) def node_cmd(self, node_name, sql, args, quiet = False): """Execute SQL command on particular node.""" db = self.get_node_database(node_name) if not db: self.log.warning("ignoring cmd for dead node '%s': %s", node_name, skytools.quote_statement(sql, args)) return None return self.exec_cmd(db, sql, args, quiet = quiet, prefix=node_name) # # Various operation on nodes. # def set_paused(self, node, consumer, pause_flag): """Set node pause flag and wait for confirmation.""" q = "select * from pgq_node.set_consumer_paused(%s, %s, %s)" self.node_cmd(node, q, [self.queue_name, consumer, pause_flag]) self.log.info('Waiting for worker to accept') while 1: q = "select * from pgq_node.get_consumer_state(%s, %s)" stat = self.node_cmd(node, q, [self.queue_name, consumer], quiet = 1)[0] if stat['paused'] != pause_flag: raise Exception('operation canceled? %s <> %s' % (repr(stat['paused']), repr(pause_flag))) if stat['uptodate']: op = pause_flag and "paused" or "resumed" self.log.info("Consumer '%s' on node '%s' %s", consumer, node, op) return time.sleep(1) raise Exception('process canceled') def pause_consumer(self, node, consumer): """Shortcut for pausing by name.""" self.set_paused(node, consumer, True) def resume_consumer(self, node, consumer): """Shortcut for resuming by name.""" self.set_paused(node, consumer, False) def pause_node(self, node): """Shortcut for pausing by name.""" state = self.get_node_info(node) self.pause_consumer(node, state.worker_name) def resume_node(self, node): """Shortcut for resuming by name.""" state = self.get_node_info(node) if state: self.resume_consumer(node, state.worker_name) def subscribe_node(self, target_node, subscriber_node, tick_pos): """Subscribing one node to another.""" q = "select * from pgq_node.subscribe_node(%s, %s, %s)" self.node_cmd(target_node, q, [self.queue_name, subscriber_node, tick_pos]) def unsubscribe_node(self, target_node, subscriber_node): """Unsubscribing one node from another.""" q = "select * from pgq_node.unsubscribe_node(%s, %s)" self.node_cmd(target_node, q, [self.queue_name, subscriber_node]) _node_cache = {} def get_node_info(self, node_name): """Cached node info lookup.""" if node_name in self._node_cache: return self._node_cache[node_name] inf = self.load_node_info(node_name) self._node_cache[node_name] = inf return inf def load_node_info(self, node_name): """Non-cached node info lookup.""" db = self.get_node_database(node_name) if not db: self.log.warning('load_node_info(%s): ignoring dead node', node_name) return None q = "select * from pgq_node.get_node_info(%s)" rows = self.exec_query(db, q, [self.queue_name]) return NodeInfo(self.queue_name, rows[0]) def load_queue_info(self, db): """Non-cached set info lookup.""" res = self.exec_query(db, "select * from pgq_node.get_node_info(%s)", [self.queue_name]) info = res[0] q = "select * from pgq_node.get_queue_locations(%s)" member_list = self.exec_query(db, q, [self.queue_name]) qinf = QueueInfo(self.queue_name, info, member_list) if self.options.dead: for node in self.options.dead: self.log.info("Assuming node '%s' as dead", node) qinf.tag_dead(node) return qinf def get_node_subscriber_list(self, node_name): """Fetch subscriber list from a node.""" q = "select node_name, node_watermark from pgq_node.get_subscriber_info(%s)" db = self.get_node_database(node_name) rows = self.exec_query(db, q, [self.queue_name]) return [r['node_name'] for r in rows] def get_node_consumer_map(self, node_name): """Fetch consumer list from a node.""" q = "select consumer_name, provider_node, last_tick_id from pgq_node.get_consumer_info(%s)" db = self.get_node_database(node_name) rows = self.exec_query(db, q, [self.queue_name]) res = {} for r in rows: res[r['consumer_name']] = r return res if __name__ == '__main__': script = CascadeAdmin('setadm', 'node_db', sys.argv[1:], worker_setup = False) script.start() skytools-3.2.6/python/pgq/cascade/nodeinfo.py0000644000000000000000000002146612426435645016171 0ustar #! /usr/bin/env python """Info about node/set/members. For admin tool. """ __all__ = ['MemberInfo', 'NodeInfo', 'QueueInfo'] import datetime import skytools # node types ROOT = 'root' BRANCH = 'branch' LEAF = 'leaf' class MemberInfo: """Info about set member.""" def __init__(self, row): self.name = row['node_name'] self.location = row['node_location'] self.dead = row['dead'] def ival2str(iv): res = "" tmp, secs = divmod(iv.seconds, 60) hrs, mins = divmod(tmp, 60) if iv.days: res += "%dd" % iv.days if hrs: res += "%dh" % hrs if mins: res += "%dm" % mins res += "%ds" % secs return res class NodeInfo: """Detailed info about set node.""" name = None type = None global_watermark = None local_watermark = None completed_tick = None provider_node = None provider_location = None consumer_name = None #? worker_name = None #? paused = False uptodate = True combined_queue = None combined_type = None last_tick = None node_attrs = {} def __init__(self, queue_name, row, main_worker = True, node_name = None): self.queue_name = queue_name self.member_map = {} self.main_worker = main_worker self.parent = None self.consumer_map = {} self.queue_info = {} self._info_lines = [] self.cascaded_consumer_map = {} self._row = row if not row: self.name = node_name self.type = 'dead' return self.name = row['node_name'] self.type = row['node_type'] self.global_watermark = row['global_watermark'] self.local_watermark = row['local_watermark'] self.completed_tick = row['worker_last_tick'] self.provider_node = row['provider_node'] self.provider_location = row['provider_location'] self.consumer_name = row['worker_name'] self.worker_name = row['worker_name'] self.paused = row['worker_paused'] self.uptodate = row['worker_uptodate'] self.combined_queue = row['combined_queue'] self.combined_type = row['combined_type'] self.last_tick = row['worker_last_tick'] self.node_attrs = {} if 'node_attrs' in row: a = row['node_attrs'] if a: self.node_attrs = skytools.db_urldecode(a) def __get_target_queue(self): qname = None if self.type == LEAF: if self.combined_queue: qname = self.combined_queue else: return None else: qname = self.queue_name if qname is None: raise Exception("no target queue") return qname def get_title(self): return "%s (%s)" % (self.name, self.type) def get_infolines(self): lst = self._info_lines lag = None if self.parent: root = self.parent while root.parent: root = root.parent cinfo = self.parent.consumer_map.get(self.consumer_name) if cinfo and root.queue_info: tick_time = cinfo['tick_time'] root_time = root.queue_info['now'] if root_time < tick_time: # ignore negative lag - probably due to info gathering # taking long time lag = datetime.timedelta(0) else: lag = root_time - tick_time elif self.queue_info: lag = self.queue_info['ticker_lag'] txt = "Lag: %s" % (lag and ival2str(lag) or "(n/a)") if self.last_tick: txt += ", Tick: %s" % self.last_tick if self.paused: txt += ", PAUSED" if not self.uptodate: txt += ", NOT UPTODATE" lst.append(txt) for k, v in self.node_attrs.items(): txt = "Attr: %s=%s" % (k, v) lst.append(txt) for cname, row in self.cascaded_consumer_map.items(): err = row['cur_error'] if err: # show only first line pos = err.find('\n') if pos > 0: err = err[:pos] lst.append("ERR: %s: %s" % (cname, err)) return lst def add_info_line(self, ln): self._info_lines.append(ln) def load_status(self, curs): self.consumer_map = {} self.queue_info = {} self.cascaded_consumer_map = {} if self.queue_name: q = "select consumer_name, current_timestamp - lag as tick_time,"\ " lag, last_seen, last_tick "\ "from pgq.get_consumer_info(%s)" curs.execute(q, [self.queue_name]) for row in curs.fetchall(): cname = row['consumer_name'] self.consumer_map[cname] = row q = "select current_timestamp - ticker_lag as tick_time,"\ " ticker_lag, current_timestamp as now "\ "from pgq.get_queue_info(%s)" curs.execute(q, [self.queue_name]) self.queue_info = curs.fetchone() q = "select * from pgq_node.get_consumer_info(%s)" curs.execute(q, [self.queue_name]) for row in curs.fetchall(): cname = row['consumer_name'] self.cascaded_consumer_map[cname] = row class QueueInfo: """Info about cascaded queue. Slightly broken, as all info is per-node. """ def __init__(self, queue_name, info_row, member_rows): self.local_node = NodeInfo(queue_name, info_row) self.queue_name = queue_name self.member_map = {} self.node_map = {} self.add_node(self.local_node) for r in member_rows: m = MemberInfo(r) self._add_member(m) def _add_member(self, member): self.member_map[member.name] = member def get_member(self, name): return self.member_map.get(name) def get_node(self, name): return self.node_map.get(name) def add_node(self, node): self.node_map[node.name] = node def tag_dead(self, node_name): if node_name in self.node_map: self.member_map[node_name].dead = True else: row = {'node_name': node_name, 'node_location': None, 'dead': True} m = MemberInfo(row) self.member_map[node_name] = m # # Rest is about printing the tree # _DATAFMT = "%-30s%s" def print_tree(self): """Print ascii-tree for set. Expects that data for all nodes is filled in.""" print('Queue: %s Local node: %s' % (self.queue_name, self.local_node.name)) print('') root_list = self._prepare_tree() for root in root_list: self._tree_calc(root) datalines = self._print_node(root, '', []) for ln in datalines: print(self._DATAFMT % (' ', ln)) def _print_node(self, node, pfx, datalines): # print a tree fragment for node and info # returns list of unprinted data rows for ln in datalines: print(self._DATAFMT % (_setpfx(pfx, '|'), ln)) datalines = node.get_infolines() print("%s%s" % (_setpfx(pfx, '+--: '), node.get_title())) for i, n in enumerate(node.child_list): sfx = ((i < len(node.child_list) - 1) and ' |' or ' ') datalines = self._print_node(n, pfx + sfx, datalines) return datalines def _prepare_tree(self): # reset vars, fill parent and child_list for each node # returns list of root nodes (mostly 1) for node in self.node_map.values(): node.total_childs = 0 node.levels = 0 node.child_list = [] node.parent = None root_list = [] for node in self.node_map.values(): if node.provider_node \ and node.provider_node != node.name \ and node.provider_node in self.node_map: p = self.node_map[node.provider_node] p.child_list.append(node) node.parent = p else: node.parent = None root_list.append(node) return root_list def _tree_calc(self, node): # calculate levels and count total childs # sort the tree based on them total = len(node.child_list) levels = 1 for subnode in node.child_list: self._tree_calc(subnode) total += subnode.total_childs if levels < subnode.levels + 1: levels = subnode.levels + 1 node.total_childs = total node.levels = levels node.child_list.sort(key = _node_key) def _setpfx(pfx, sfx): if pfx: pfx = pfx[:-1] + sfx return pfx def _node_key(n): return (n.levels, n.total_childs, n.name) skytools-3.2.6/python/pgq/cascade/worker.py0000644000000000000000000003736512426435645015706 0ustar """Cascaded worker. CascadedConsumer that also maintains node. """ import sys, time, skytools from pgq.cascade.consumer import CascadedConsumer from pgq.producer import bulk_insert_events from pgq.event import Event __all__ = ['CascadedWorker'] class WorkerState: """Depending on node state decides on actions worker needs to do.""" # node_type, # node_name, provider_node, # global_watermark, local_watermark # combined_queue, combined_type process_batch = 0 # handled in CascadedConsumer copy_events = 0 # ok global_wm_event = 0 # ok local_wm_publish = 1 # ok process_events = 0 # ok send_tick_event = 0 # ok wait_behind = 0 # ok process_tick_event = 0 # ok target_queue = '' # ok keep_event_ids = 0 # ok create_tick = 0 # ok filtered_copy = 0 # ok process_global_wm = 0 # ok sync_watermark = 0 # ? wm_sync_nodes = [] def __init__(self, queue_name, nst): self.node_type = nst['node_type'] self.node_name = nst['node_name'] self.local_watermark = nst['local_watermark'] self.global_watermark = nst['global_watermark'] self.node_attrs = {} attrs = nst.get('node_attrs', '') if attrs: self.node_attrs = skytools.db_urldecode(attrs) ntype = nst['node_type'] ctype = nst['combined_type'] if ntype == 'root': self.global_wm_event = 1 self.local_wm_publish = 0 elif ntype == 'branch': self.target_queue = queue_name self.process_batch = 1 self.process_events = 1 self.copy_events = 1 self.process_tick_event = 1 self.keep_event_ids = 1 self.create_tick = 1 if 'sync_watermark' in self.node_attrs: slist = self.node_attrs['sync_watermark'] self.sync_watermark = 1 self.wm_sync_nodes = slist.split(',') else: self.process_global_wm = 1 elif ntype == 'leaf' and not ctype: self.process_batch = 1 self.process_events = 1 elif ntype == 'leaf' and ctype: self.target_queue = nst['combined_queue'] if ctype == 'root': self.process_batch = 1 self.process_events = 1 self.copy_events = 1 self.filtered_copy = 1 self.send_tick_event = 1 elif ctype == 'branch': self.process_batch = 1 self.wait_behind = 1 else: raise Exception('invalid state 1') else: raise Exception('invalid state 2') if ctype and ntype != 'leaf': raise Exception('invalid state 3') class CascadedWorker(CascadedConsumer): """CascadedWorker base class. Config fragment:: ## Parameters for pgq.CascadedWorker ## # how often the root node should push wm downstream (seconds) #global_wm_publish_period = 300 # how often the nodes should report their wm upstream (seconds) #local_wm_publish_period = 300 """ global_wm_publish_time = 0 global_wm_publish_period = 5 * 60 local_wm_publish_time = 0 local_wm_publish_period = 5 * 60 max_evbuf = 500 cur_event_seq = 0 cur_max_id = 0 seq_buffer = 10000 main_worker = True _worker_state = None ev_buf = [] real_global_wm = None def __init__(self, service_name, db_name, args): """Initialize new consumer. @param service_name: service_name for DBScript @param db_name: target database name for get_database() @param args: cmdline args for DBScript """ CascadedConsumer.__init__(self, service_name, db_name, args) def reload(self): CascadedConsumer.reload(self) self.global_wm_publish_period = self.cf.getfloat('global_wm_publish_period', CascadedWorker.global_wm_publish_period) self.local_wm_publish_period = self.cf.getfloat('local_wm_publish_period', CascadedWorker.local_wm_publish_period) def process_remote_batch(self, src_db, tick_id, event_list, dst_db): """Worker-specific event processing.""" self.ev_buf = [] max_id = 0 st = self._worker_state if st.wait_behind: self.wait_for_tick(dst_db, tick_id) src_curs = src_db.cursor() dst_curs = dst_db.cursor() for ev in event_list: if st.copy_events: self.copy_event(dst_curs, ev, st.filtered_copy) if ev.ev_type.split('.', 1)[0] in ("pgq", "londiste"): # process cascade events even on waiting leaf node self.process_remote_event(src_curs, dst_curs, ev) else: if st.process_events: self.process_remote_event(src_curs, dst_curs, ev) if ev.ev_id > max_id: max_id = ev.ev_id if max_id > self.cur_max_id: self.cur_max_id = max_id def wait_for_tick(self, dst_db, tick_id): """On combined-branch leaf needs to wait from tick to appear from combined-root. """ while 1: cst = self._consumer_state if cst['completed_tick'] >= tick_id: return self.sleep(10 * self.loop_delay) self._consumer_state = self.refresh_state(dst_db) if not self.looping: sys.exit(0) def is_batch_done(self, state, batch_info, dst_db): wst = self._worker_state # on combined-branch the target can get several batches ahead if wst.wait_behind: # let the wait-behind logic track ticks return False # check if events have processed done = CascadedConsumer.is_batch_done(self, state, batch_info, dst_db) if not wst.create_tick: return done if not done: return False # check if tick is done - it happens in separate tx # fetch last tick from target queue q = "select t.tick_id from pgq.tick t, pgq.queue q"\ " where t.tick_queue = q.queue_id and q.queue_name = %s"\ " order by t.tick_queue desc, t.tick_id desc"\ " limit 1" curs = dst_db.cursor() curs.execute(q, [self.queue_name]) last_tick = curs.fetchone()['tick_id'] dst_db.commit() # insert tick if missing cur_tick = batch_info['tick_id'] if last_tick != cur_tick: prev_tick = batch_info['prev_tick_id'] tick_time = batch_info['batch_end'] if last_tick != prev_tick: raise Exception('is_batch_done: last branch tick = %d, expected %d or %d' % ( last_tick, prev_tick, cur_tick)) self.create_branch_tick(dst_db, cur_tick, tick_time) return True def publish_local_wm(self, src_db, dst_db): """Send local watermark to provider. """ t = time.time() if t - self.local_wm_publish_time < self.local_wm_publish_period: return st = self._worker_state wm = st.local_watermark if st.sync_watermark: # dont send local watermark upstream wm = self.batch_info['prev_tick_id'] elif wm > self.batch_info['cur_tick_id']: # in wait-behind-leaf case, the wm from target can be # ahead from source queue, use current batch then wm = self.batch_info['cur_tick_id'] self.log.debug("Publishing local watermark: %d", wm) src_curs = src_db.cursor() q = "select * from pgq_node.set_subscriber_watermark(%s, %s, %s)" src_curs.execute(q, [self.pgq_queue_name, st.node_name, wm]) src_db.commit() # if next part fails, dont repeat it immediately self.local_wm_publish_time = t if st.sync_watermark and self.real_global_wm is not None: # instead sync 'global-watermark' with specific nodes dst_curs = dst_db.cursor() nmap = self._get_node_map(dst_curs) dst_db.commit() # local lowest wm = st.local_watermark # the global-watermark in subtree can stay behind # upstream global-watermark, but must not go ahead if self.real_global_wm < wm: wm = self.real_global_wm for node in st.wm_sync_nodes: if node == st.node_name: continue if node not in nmap: # dont ignore missing nodes - cluster may be partially set up self.log.warning('Unknown node in sync_watermark list: %s', node) return n = nmap[node] if n['dead']: # ignore dead nodes continue wmdb = self.get_database('wmdb', connstr = n['node_location'], autocommit = 1, profile = 'remote') wmcurs = wmdb.cursor() q = 'select local_watermark from pgq_node.get_node_info(%s)' wmcurs.execute(q, [self.queue_name]) row = wmcurs.fetchone() if not row: # partially set up node? self.log.warning('Node not working: %s', node) elif row['local_watermark'] < wm: # keep lowest wm wm = row['local_watermark'] self.close_database('wmdb') # now we have lowest wm, store it q = "select pgq_node.set_global_watermark(%s, %s)" dst_curs.execute(q, [self.queue_name, wm]) dst_db.commit() def _get_node_map(self, curs): q = "select node_name, node_location, dead from pgq_node.get_queue_locations(%s)" curs.execute(q, [self.queue_name]) res = {} for row in curs.fetchall(): res[row['node_name']] = row return res def process_remote_event(self, src_curs, dst_curs, ev): """Handle cascading events. """ if ev.retry: raise Exception('CascadedWorker must not get retry events') # non cascade events send to CascadedConsumer to error out if ev.ev_type[:4] != 'pgq.': CascadedConsumer.process_remote_event(self, src_curs, dst_curs, ev) return # ignore cascade events if not main worker if not self.main_worker: return # check if for right queue t = ev.ev_type if ev.ev_extra1 != self.pgq_queue_name and t != "pgq.tick-id": raise Exception("bad event in queue: "+str(ev)) self.log.debug("got cascade event: %s(%s)", t, ev.ev_data) st = self._worker_state if t == "pgq.location-info": node = ev.ev_data loc = ev.ev_extra2 dead = ev.ev_extra3 q = "select * from pgq_node.register_location(%s, %s, %s, %s)" dst_curs.execute(q, [self.pgq_queue_name, node, loc, dead]) elif t == "pgq.unregister-location": node = ev.ev_data q = "select * from pgq_node.unregister_location(%s, %s)" dst_curs.execute(q, [self.pgq_queue_name, node]) elif t == "pgq.global-watermark": if st.sync_watermark: tick_id = int(ev.ev_data) self.log.debug('Half-ignoring global watermark %d', tick_id) self.real_global_wm = tick_id elif st.process_global_wm: tick_id = int(ev.ev_data) q = "select * from pgq_node.set_global_watermark(%s, %s)" dst_curs.execute(q, [self.pgq_queue_name, tick_id]) elif t == "pgq.tick-id": tick_id = int(ev.ev_data) if ev.ev_extra1 == self.pgq_queue_name: raise Exception('tick-id event for own queue?') if st.process_tick_event: q = "select * from pgq_node.set_partition_watermark(%s, %s, %s)" dst_curs.execute(q, [self.pgq_queue_name, ev.ev_extra1, tick_id]) else: raise Exception("unknown cascade event: %s" % t) def finish_remote_batch(self, src_db, dst_db, tick_id): """Worker-specific cleanup on target node. """ # merge-leaf on branch should not update tick pos st = self._worker_state if st.wait_behind: dst_db.commit() # still need to publish wm info if st.local_wm_publish and self.main_worker: self.publish_local_wm(src_db, dst_db) return if self.main_worker: dst_curs = dst_db.cursor() self.flush_events(dst_curs) # send tick event into queue if st.send_tick_event: q = "select pgq.insert_event(%s, 'pgq.tick-id', %s, %s, null, null, null)" dst_curs.execute(q, [st.target_queue, str(tick_id), self.pgq_queue_name]) CascadedConsumer.finish_remote_batch(self, src_db, dst_db, tick_id) if self.main_worker: if st.create_tick: # create actual tick tick_id = self.batch_info['tick_id'] tick_time = self.batch_info['batch_end'] self.create_branch_tick(dst_db, tick_id, tick_time) if st.local_wm_publish: self.publish_local_wm(src_db, dst_db) def create_branch_tick(self, dst_db, tick_id, tick_time): q = "select pgq.ticker(%s, %s, %s, %s)" # execute it in autocommit mode ilev = dst_db.isolation_level dst_db.set_isolation_level(0) dst_curs = dst_db.cursor() dst_curs.execute(q, [self.pgq_queue_name, tick_id, tick_time, self.cur_max_id]) dst_db.set_isolation_level(ilev) def copy_event(self, dst_curs, ev, filtered_copy): """Add event to copy buffer. """ if not self.main_worker: return if filtered_copy: if ev.type[:4] == "pgq.": return if len(self.ev_buf) >= self.max_evbuf: self.flush_events(dst_curs) if ev.type == 'pgq.global-watermark': st = self._worker_state if st.sync_watermark: # replace payload with synced global watermark row = ev._event_row.copy() row['ev_data'] = str(st.global_watermark) ev = Event(self.queue_name, row) self.ev_buf.append(ev) def flush_events(self, dst_curs): """Send copy buffer to target queue. """ if len(self.ev_buf) == 0: return flds = ['ev_time', 'ev_type', 'ev_data', 'ev_extra1', 'ev_extra2', 'ev_extra3', 'ev_extra4'] st = self._worker_state if st.keep_event_ids: flds.append('ev_id') bulk_insert_events(dst_curs, self.ev_buf, flds, st.target_queue) self.ev_buf = [] def refresh_state(self, dst_db, full_logic = True): """Load also node state from target node. """ res = CascadedConsumer.refresh_state(self, dst_db, full_logic) q = "select * from pgq_node.get_node_info(%s)" st = self.exec_cmd(dst_db, q, [ self.pgq_queue_name ]) self._worker_state = WorkerState(self.pgq_queue_name, st[0]) return res def process_root_node(self, dst_db): """On root node send global watermark downstream. """ CascadedConsumer.process_root_node(self, dst_db) t = time.time() if t - self.global_wm_publish_time < self.global_wm_publish_period: return self.log.debug("Publishing global watermark") dst_curs = dst_db.cursor() q = "select * from pgq_node.set_global_watermark(%s, NULL)" dst_curs.execute(q, [self.pgq_queue_name]) dst_db.commit() self.global_wm_publish_time = t skytools-3.2.6/python/pgq/cascade/consumer.py0000644000000000000000000002340312426435645016214 0ustar """Cascaded consumer. Does not maintain node, but is able to pause, resume and switch provider. """ import sys, time from pgq.baseconsumer import BaseConsumer PDB = '_provider_db' __all__ = ['CascadedConsumer'] class CascadedConsumer(BaseConsumer): """CascadedConsumer base class. Loads provider from target node, accepts pause/resume commands. """ _consumer_state = None def __init__(self, service_name, db_name, args): """Initialize new consumer. @param service_name: service_name for DBScript @param db_name: target database name for get_database() @param args: cmdline args for DBScript """ BaseConsumer.__init__(self, service_name, PDB, args) self.log.debug("__init__") self.target_db = db_name self.provider_connstr = None def init_optparse(self, parser = None): p = BaseConsumer.init_optparse(self, parser) p.add_option("--provider", help = "provider location for --register") p.add_option("--rewind", action = "store_true", help = "change queue position according to destination") p.add_option("--reset", action = "store_true", help = "reset queue position on destination side") return p def startup(self): if self.options.rewind: self.rewind() sys.exit(0) if self.options.reset: self.dst_reset() sys.exit(0) return BaseConsumer.startup(self) def register_consumer(self, provider_loc = None): """Register consumer on source node first, then target node.""" if not provider_loc: provider_loc = self.options.provider if not provider_loc: self.log.error('Please give provider location with --provider=') sys.exit(1) dst_db = self.get_database(self.target_db) dst_curs = dst_db.cursor() src_db = self.get_database(PDB, connstr = provider_loc, profile = 'remote') src_curs = src_db.cursor() # check target info q = "select * from pgq_node.get_node_info(%s)" res = self.exec_cmd(src_db, q, [ self.queue_name ]) pnode = res[0]['node_name'] if not pnode: raise Exception('parent node not initialized?') # source queue BaseConsumer.register_consumer(self) # fetch pos q = "select last_tick from pgq.get_consumer_info(%s, %s)" src_curs.execute(q, [self.queue_name, self.consumer_name]) last_tick = src_curs.fetchone()['last_tick'] if not last_tick: raise Exception('registration failed?') src_db.commit() # target node q = "select * from pgq_node.register_consumer(%s, %s, %s, %s)" self.exec_cmd(dst_db, q, [self.queue_name, self.consumer_name, pnode, last_tick]) def get_consumer_state(self): dst_db = self.get_database(self.target_db) dst_curs = dst_db.cursor() q = "select * from pgq_node.get_consumer_state(%s, %s)" rows = self.exec_cmd(dst_db, q, [ self.queue_name, self.consumer_name ]) state = rows[0] return state def get_provider_db(self, state): provider_loc = state['provider_location'] return self.get_database(PDB, connstr = provider_loc, profile = 'remote') def unregister_consumer(self): dst_db = self.get_database(self.target_db) state = self.get_consumer_state() src_db = self.get_provider_db(state) # unregister on provider BaseConsumer.unregister_consumer(self) # unregister on subscriber q = "select * from pgq_node.unregister_consumer(%s, %s)" self.exec_cmd(dst_db, q, [ self.queue_name, self.consumer_name ]) def rewind(self): self.log.info("Rewinding queue") dst_db = self.get_database(self.target_db) dst_curs = dst_db.cursor() state = self.get_consumer_state() src_db = self.get_provider_db(state) src_curs = src_db.cursor() dst_tick = state['completed_tick'] if dst_tick: q = "select pgq.register_consumer_at(%s, %s, %s)" src_curs.execute(q, [self.queue_name, self.consumer_name, dst_tick]) else: self.log.warning('No tick found on dst side') dst_db.commit() src_db.commit() def dst_reset(self): self.log.info("Resetting queue tracking on dst side") dst_db = self.get_database(self.target_db) dst_curs = dst_db.cursor() state = self.get_consumer_state() src_db = self.get_provider_db(state) src_curs = src_db.cursor() # fetch last tick from source q = "select last_tick from pgq.get_consumer_info(%s, %s)" src_curs.execute(q, [self.queue_name, self.consumer_name]) row = src_curs.fetchone() src_db.commit() # on root node we dont have consumer info if not row: self.log.info("No info about consumer, cannot reset") return # set on destination last_tick = row['last_tick'] q = "select * from pgq_node.set_consumer_completed(%s, %s, %s)" dst_curs.execute(q, [self.queue_name, self.consumer_name, last_tick]) dst_db.commit() def process_batch(self, src_db, batch_id, event_list): state = self._consumer_state dst_db = self.get_database(self.target_db) if self.is_batch_done(state, self.batch_info, dst_db): return tick_id = self.batch_info['tick_id'] self.process_remote_batch(src_db, tick_id, event_list, dst_db) # this also commits self.finish_remote_batch(src_db, dst_db, tick_id) def process_root_node(self, dst_db): """This is called on root node, where no processing should happen. """ # extra sleep time.sleep(10*self.loop_delay) self.log.info('{standby: 1}') def work(self): """Refresh state before calling Consumer.work().""" dst_db = self.get_database(self.target_db) self._consumer_state = self.refresh_state(dst_db) if self._consumer_state['node_type'] == 'root': self.process_root_node(dst_db) return if not self.provider_connstr: raise Exception('provider_connstr not set') src_db = self.get_provider_db(self._consumer_state) return BaseConsumer.work(self) def refresh_state(self, dst_db, full_logic = True): """Fetch consumer state from target node. This also sleeps if pause is set and updates "uptodate" flag to notify that data is refreshed. """ while 1: q = "select * from pgq_node.get_consumer_state(%s, %s)" rows = self.exec_cmd(dst_db, q, [ self.queue_name, self.consumer_name ]) state = rows[0] # tag refreshed if not state['uptodate'] and full_logic: q = "select * from pgq_node.set_consumer_uptodate(%s, %s, true)" self.exec_cmd(dst_db, q, [ self.queue_name, self.consumer_name ]) if state['cur_error'] and self.work_state != -1: q = "select * from pgq_node.set_consumer_error(%s, %s, NULL)" self.exec_cmd(dst_db, q, [ self.queue_name, self.consumer_name ]) if not state['paused'] or not full_logic: break time.sleep(self.loop_delay) # update connection loc = state['provider_location'] if self.provider_connstr != loc: self.close_database(PDB) self.provider_connstr = loc # re-initialize provider connection db = self.get_provider_db(state); return state def is_batch_done(self, state, batch_info, dst_db): cur_tick = batch_info['tick_id'] prev_tick = batch_info['prev_tick_id'] dst_tick = state['completed_tick'] if not dst_tick: raise Exception('dst_tick NULL?') if prev_tick == dst_tick: # on track return False if cur_tick == dst_tick: # current batch is already applied, skip it return True # anything else means problems raise Exception('Lost position: batch %s..%s, dst has %s' % ( prev_tick, cur_tick, dst_tick)) def process_remote_batch(self, src_db, tick_id, event_list, dst_db): """Per-batch callback. By default just calls process_remote_event() in loop.""" src_curs = src_db.cursor() dst_curs = dst_db.cursor() for ev in event_list: self.process_remote_event(src_curs, dst_curs, ev) def process_remote_event(self, src_curs, dst_curs, ev): """Per-event callback. By default ignores cascading events and gives error on others. Can be called from user handler to finish unprocessed events. """ if ev.ev_type[:4] == "pgq.": # ignore cascading events pass else: raise Exception('Unhandled event type in queue: %s' % ev.ev_type) def finish_remote_batch(self, src_db, dst_db, tick_id): """Called after event processing. This should finish work on remote db and commit there. """ # this also commits q = "select * from pgq_node.set_consumer_completed(%s, %s, %s)" self.exec_cmd(dst_db, q, [ self.queue_name, self.consumer_name, tick_id ]) def exception_hook(self, det, emsg): try: dst_db = self.get_database(self.target_db) q = "select * from pgq_node.set_consumer_error(%s, %s, %s)" self.exec_cmd(dst_db, q, [ self.queue_name, self.consumer_name, emsg ]) except: self.log.warning("Failure to call pgq_node.set_consumer_error()") self.reset() BaseConsumer.exception_hook(self, det, emsg) skytools-3.2.6/python/pgq/cascade/__init__.py0000644000000000000000000000003712426435645016116 0ustar """Cascaded Queue support.""" skytools-3.2.6/python/pgq/remoteconsumer.py0000644000000000000000000001343012426435645016044 0ustar """ old RemoteConsumer / SerialConsumer classes. """ import sys from pgq.consumer import Consumer __all__ = ['RemoteConsumer', 'SerialConsumer'] class RemoteConsumer(Consumer): """Helper for doing event processing in another database. Requires that whole batch is processed in one TX. """ def __init__(self, service_name, db_name, remote_db, args): Consumer.__init__(self, service_name, db_name, args) self.remote_db = remote_db def process_batch(self, db, batch_id, event_list): """Process all events in batch. By default calls process_event for each. """ dst_db = self.get_database(self.remote_db) curs = dst_db.cursor() if self.is_last_batch(curs, batch_id): return self.process_remote_batch(db, batch_id, event_list, dst_db) self.set_last_batch(curs, batch_id) dst_db.commit() def is_last_batch(self, dst_curs, batch_id): """Helper function to keep track of last successful batch in external database. """ q = "select pgq_ext.is_batch_done(%s, %s)" dst_curs.execute(q, [ self.consumer_name, batch_id ]) return dst_curs.fetchone()[0] def set_last_batch(self, dst_curs, batch_id): """Helper function to set last successful batch in external database. """ q = "select pgq_ext.set_batch_done(%s, %s)" dst_curs.execute(q, [ self.consumer_name, batch_id ]) def process_remote_batch(self, db, batch_id, event_list, dst_db): raise Exception('process_remote_batch not implemented') class SerialConsumer(Consumer): """Consumer that applies batches sequentially in second database. Requirements: - Whole batch in one TX. - Must not use retry queue. Features: - Can detect if several batches are already applied to dest db. - If some ticks are lost. allows to seek back on queue. Whether it succeeds, depends on pgq configuration. """ def __init__(self, service_name, db_name, remote_db, args): Consumer.__init__(self, service_name, db_name, args) self.remote_db = remote_db self.dst_schema = "pgq_ext" def startup(self): if self.options.rewind: self.rewind() sys.exit(0) if self.options.reset: self.dst_reset() sys.exit(0) return Consumer.startup(self) def init_optparse(self, parser = None): p = Consumer.init_optparse(self, parser) p.add_option("--rewind", action = "store_true", help = "change queue position according to destination") p.add_option("--reset", action = "store_true", help = "reset queue pos on destination side") return p def process_batch(self, db, batch_id, event_list): """Process all events in batch. """ dst_db = self.get_database(self.remote_db) curs = dst_db.cursor() # check if done if self.is_batch_done(curs): return # actual work self.process_remote_batch(db, batch_id, event_list, dst_db) # finish work self.set_batch_done(curs) dst_db.commit() def is_batch_done(self, dst_curs): """Helper function to keep track of last successful batch in external database. """ cur_tick = self.batch_info['tick_id'] prev_tick = self.batch_info['prev_tick_id'] dst_tick = self.get_last_tick(dst_curs) if not dst_tick: # seems this consumer has not run yet against dst_db return False if prev_tick == dst_tick: # on track return False if cur_tick == dst_tick: # current batch is already applied, skip it return True # anything else means problems raise Exception('Lost position: batch %d..%d, dst has %d' % ( prev_tick, cur_tick, dst_tick)) def set_batch_done(self, dst_curs): """Helper function to set last successful batch in external database. """ tick_id = self.batch_info['tick_id'] self.set_last_tick(dst_curs, tick_id) def register_consumer(self): new = Consumer.register_consumer(self) if new: # fixme self.dst_reset() def unregister_consumer(self): """If unregistering, also clean completed tick table on dest.""" Consumer.unregister_consumer(self) self.dst_reset() def process_remote_batch(self, db, batch_id, event_list, dst_db): raise Exception('process_remote_batch not implemented') def rewind(self): self.log.info("Rewinding queue") src_db = self.get_database(self.db_name) dst_db = self.get_database(self.remote_db) src_curs = src_db.cursor() dst_curs = dst_db.cursor() dst_tick = self.get_last_tick(dst_curs) if dst_tick: q = "select pgq.register_consumer_at(%s, %s, %s)" src_curs.execute(q, [self.queue_name, self.consumer_name, dst_tick]) else: self.log.warning('No tick found on dst side') dst_db.commit() src_db.commit() def dst_reset(self): self.log.info("Resetting queue tracking on dst side") dst_db = self.get_database(self.remote_db) dst_curs = dst_db.cursor() self.set_last_tick(dst_curs, None) dst_db.commit() def get_last_tick(self, dst_curs): q = "select %s.get_last_tick(%%s)" % self.dst_schema dst_curs.execute(q, [self.consumer_name]) res = dst_curs.fetchone() return res[0] def set_last_tick(self, dst_curs, tick_id): q = "select %s.set_last_tick(%%s, %%s)" % self.dst_schema dst_curs.execute(q, [ self.consumer_name, tick_id ]) skytools-3.2.6/python/pgq/consumer.py0000644000000000000000000000737312426435645014641 0ustar """PgQ consumer framework for Python. """ from pgq.baseconsumer import BaseConsumer, BaseBatchWalker from pgq.event import Event __all__ = ['Consumer'] # Event status codes EV_UNTAGGED = -1 EV_RETRY = 0 EV_DONE = 1 class RetriableEvent(Event): """Event which can be retried Consumer is supposed to tag them after processing. """ __slots__ = ('_status', ) def __init__(self, queue_name, row): super(RetriableEvent, self).__init__(queue_name, row) self._status = EV_DONE def tag_done(self): self._status = EV_DONE def get_status(self): return self._status def tag_retry(self, retry_time = 60): self._status = EV_RETRY self.retry_time = retry_time class RetriableWalkerEvent(RetriableEvent): """Redirects status flags to RetriableBatchWalker. That way event data can be gc'd immediately and tag_done() events don't need to be remembered. """ __slots__ = ('_walker', ) def __init__(self, walker, queue_name, row): super(RetriableWalkerEvent, self).__init__(queue_name, row) self._walker = walker def tag_done(self): self._walker.tag_event_done(self) def get_status(self): self._walker.get_status(self) def tag_retry(self, retry_time = 60): self._walker.tag_event_retry(self, retry_time) class RetriableBatchWalker(BaseBatchWalker): """BatchWalker that returns RetriableEvents """ def __init__(self, curs, batch_id, queue_name, fetch_size = 300, consumer_filter = None): super(RetriableBatchWalker, self).__init__(curs, batch_id, queue_name, fetch_size, consumer_filter) self.status_map = {} def _make_event(self, queue_name, row): return RetriableWalkerEvent(self, queue_name, row) def tag_event_done(self, event): if event.id in self.status_map: del self.status_map[event.id] def tag_event_retry(self, event, retry_time): self.status_map[event.id] = (EV_RETRY, retry_time) def get_status(self, event): return self.status_map.get(event.id, (EV_DONE, 0))[0] def iter_status(self): for res in self.status_map.iteritems(): yield res class Consumer(BaseConsumer): """Normal consumer base class. Can retry events """ _batch_walker_class = RetriableBatchWalker def _make_event(self, queue_name, row): return RetriableEvent(queue_name, row) def _flush_retry(self, curs, batch_id, list): """Tag retry events.""" retry = 0 if self.pgq_lazy_fetch: for ev_id, stat in list.iter_status(): if stat[0] == EV_RETRY: self._tag_retry(curs, batch_id, ev_id, stat[1]) retry += 1 elif stat[0] != EV_DONE: raise Exception("Untagged event: id=%d" % ev_id) else: for ev in list: if ev._status == EV_RETRY: self._tag_retry(curs, batch_id, ev.id, ev.retry_time) retry += 1 elif ev._status != EV_DONE: raise Exception("Untagged event: (id=%d, type=%s, data=%s, ex1=%s" % ( ev.id, ev.type, ev.data, ev.extra1)) # report weird events if retry: self.stat_increase('retry-events', retry) def _finish_batch(self, curs, batch_id, list): """Tag events and notify that the batch is done.""" self._flush_retry(curs, batch_id, list) super(Consumer, self)._finish_batch(curs, batch_id, list) def _tag_retry(self, cx, batch_id, ev_id, retry_time): """Tag event for retry. (internal)""" cx.execute("select pgq.event_retry(%s, %s, %s)", [batch_id, ev_id, retry_time]) skytools-3.2.6/python/pgq/__init__.py0000644000000000000000000000172412426435645014537 0ustar """PgQ framework for Python.""" __pychecker__ = 'no-miximport' import pgq.event import pgq.consumer import pgq.remoteconsumer import pgq.producer import pgq.status import pgq.cascade import pgq.cascade.nodeinfo import pgq.cascade.admin import pgq.cascade.consumer import pgq.cascade.worker from pgq.event import * from pgq.consumer import * from pgq.coopconsumer import * from pgq.remoteconsumer import * from pgq.localconsumer import * from pgq.producer import * from pgq.status import * from pgq.cascade.nodeinfo import * from pgq.cascade.admin import * from pgq.cascade.consumer import * from pgq.cascade.worker import * __all__ = ( pgq.event.__all__ + pgq.consumer.__all__ + pgq.coopconsumer.__all__ + pgq.remoteconsumer.__all__ + pgq.localconsumer.__all__ + pgq.cascade.nodeinfo.__all__ + pgq.cascade.admin.__all__ + pgq.cascade.consumer.__all__ + pgq.cascade.worker.__all__ + pgq.producer.__all__ + pgq.status.__all__ ) skytools-3.2.6/python/londiste/0000755000000000000000000000000012426435645013454 5ustar skytools-3.2.6/python/londiste/handler.py0000644000000000000000000003121612426435645015446 0ustar """Table handler. Per-table decision how to create trigger, copy data and apply events. """ """ -- redirect & create table partition by batch_time partition by date field -- sql handling: cube1 - I/U/D -> partition, insert cube2 - I/U/D -> partition, del/insert field remap name remap bublin filter - replay: filter events - copy: additional where - add: add trigger args multimaster - replay: conflict handling, add fncall to sql queue? - add: add 'backup' arg to trigger plain londiste: - replay: add to sql queue """ import sys import logging import skytools import londiste.handlers __all__ = ['RowCache', 'BaseHandler', 'build_handler', 'EncodingValidator', 'load_handler_modules', 'create_handler_string'] class RowCache: def __init__(self, table_name): self.table_name = table_name self.keys = {} self.rows = [] def add_row(self, d): row = [None] * len(self.keys) for k, v in d.items(): try: row[self.keys[k]] = v except KeyError: i = len(row) self.keys[k] = i row.append(v) row = tuple(row) self.rows.append(row) def get_fields(self): row = [None] * len(self.keys) for k, i in self.keys.keys(): row[i] = k return tuple(row) def apply_rows(self, curs): fields = self.get_fields() skytools.magic_insert(curs, self.table_name, self.rows, fields) class BaseHandler: """Defines base API, does nothing. """ handler_name = 'nop' log = logging.getLogger('basehandler') def __init__(self, table_name, args, dest_table): self.table_name = table_name self.dest_table = dest_table or table_name self.fq_table_name = skytools.quote_fqident(self.table_name) self.fq_dest_table = skytools.quote_fqident(self.dest_table) self.args = args self._check_args (args) self.conf = self.get_config() def _parse_args_from_doc (self): doc = self.__doc__ or "" params_descr = [] params_found = False for line in doc.splitlines(): ln = line.strip() if params_found: if ln == "": break descr = ln.split (None, 1) name, sep, rest = descr[0].partition('=') if sep: expr = descr[0].rstrip(":") text = descr[1].lstrip(":- \t") else: name, expr, text = params_descr.pop() text += "\n" + ln params_descr.append ((name, expr, text)) elif ln == "Parameters:": params_found = True return params_descr def _check_args (self, args): self.valid_arg_names = [] passed_arg_names = args.keys() if args else [] args_from_doc = self._parse_args_from_doc() if args_from_doc: self.valid_arg_names = list(zip(*args_from_doc)[0]) invalid = set(passed_arg_names) - set(self.valid_arg_names) if invalid: raise ValueError ("Invalid handler argument: %s" % list(invalid)) def get_arg (self, name, value_list, default = None): """ Return arg value or default; also check if value allowed. """ default = default or value_list[0] val = type(default)(self.args.get(name, default)) if val not in value_list: raise Exception('Bad argument %s value %r' % (name, val)) return val def get_config (self): """ Process args dict (into handler config). """ conf = skytools.dbdict() return conf def add(self, trigger_arg_list): """Called when table is added. Can modify trigger args. """ pass def reset(self): """Called before starting to process a batch. Should clean any pending data. """ pass def prepare_batch(self, batch_info, dst_curs): """Called on first event for this table in current batch.""" pass def process_event(self, ev, sql_queue_func, arg): """Process a event. Event should be added to sql_queue or executed directly. """ pass def finish_batch(self, batch_info, dst_curs): """Called when batch finishes.""" pass def get_copy_condition(self, src_curs, dst_curs): """ Use if you want to filter data """ return '' def real_copy(self, src_tablename, src_curs, dst_curs, column_list): """do actual table copy and return tuple with number of bytes and rows copied """ condition = self.get_copy_condition(src_curs, dst_curs) return skytools.full_copy(src_tablename, src_curs, dst_curs, column_list, condition, dst_tablename = self.dest_table) def needs_table(self): """Does the handler need the table to exist on destination.""" return True class TableHandler(BaseHandler): """Default Londiste handler, inserts events into tables with plain SQL. Parameters: encoding=ENC - Validate and fix incoming data from encoding. Only 'utf8' is supported at the moment. ignore_truncate=BOOL - Ignore truncate event. Default: 0; Values: 0,1. """ handler_name = 'londiste' sql_command = { 'I': "insert into %s %s;", 'U': "update only %s set %s;", 'D': "delete from only %s where %s;", } allow_sql_event = 1 def __init__(self, table_name, args, dest_table): BaseHandler.__init__(self, table_name, args, dest_table) enc = args.get('encoding') if enc: self.encoding_validator = EncodingValidator(self.log, enc) else: self.encoding_validator = None def get_config (self): conf = BaseHandler.get_config(self) conf.ignore_truncate = self.get_arg('ignore_truncate', [0, 1], 0) return conf def process_event(self, ev, sql_queue_func, arg): row = self.parse_row_data(ev) if len(ev.type) == 1: # sql event fqname = self.fq_dest_table fmt = self.sql_command[ev.type] sql = fmt % (fqname, row) else: # urlenc event pklist = ev.type[2:].split(',') op = ev.type[0] tbl = self.dest_table if op == 'I': sql = skytools.mk_insert_sql(row, tbl, pklist) elif op == 'U': sql = skytools.mk_update_sql(row, tbl, pklist) elif op == 'D': sql = skytools.mk_delete_sql(row, tbl, pklist) sql_queue_func(sql, arg) def parse_row_data(self, ev): """Extract row data from event, with optional encoding fixes. Returns either string (sql event) or dict (urlenc event). """ if len(ev.type) == 1: if not self.allow_sql_event: raise Exception('SQL events not supported by this handler') if self.encoding_validator: return self.encoding_validator.validate_string(ev.data, self.table_name) return ev.data else: row = skytools.db_urldecode(ev.data) if self.encoding_validator: return self.encoding_validator.validate_dict(row, self.table_name) return row def real_copy(self, src_tablename, src_curs, dst_curs, column_list): """do actual table copy and return tuple with number of bytes and rows copied """ if self.encoding_validator: def _write_hook(obj, data): return self.encoding_validator.validate_copy(data, column_list, src_tablename) else: _write_hook = None condition = self.get_copy_condition(src_curs, dst_curs) return skytools.full_copy(src_tablename, src_curs, dst_curs, column_list, condition, dst_tablename = self.dest_table, write_hook = _write_hook) #------------------------------------------------------------------------------ # ENCODING VALIDATOR #------------------------------------------------------------------------------ class EncodingValidator: def __init__(self, log, encoding = 'utf-8', replacement = u'\ufffd'): """validates the correctness of given encoding. when data contains illegal symbols, replaces them with and logs the incident """ if encoding.lower() not in ('utf8', 'utf-8'): raise Exception('only utf8 supported') self.encoding = encoding self.log = log self.columns = None self.error_count = 0 def show_error(self, col, val, pfx, unew): if pfx: col = pfx + '.' + col self.log.info('Fixed invalid UTF8 in column <%s>', col) self.log.debug('<%s>: old=%r new=%r', col, val, unew) def validate_copy(self, data, columns, pfx=""): """Validate tab-separated fields""" ok, _unicode = skytools.safe_utf8_decode(data) if ok: return data # log error vals = data.split('\t') for i, v in enumerate(vals): ok, tmp = skytools.safe_utf8_decode(v) if not ok: self.show_error(columns[i], v, pfx, tmp) # return safe data return _unicode.encode('utf8') def validate_dict(self, data, pfx=""): """validates data in dict""" for k, v in data.items(): if v: ok, u = skytools.safe_utf8_decode(v) if not ok: self.show_error(k, v, pfx, u) data[k] = u.encode('utf8') return data def validate_string(self, value, pfx=""): """validate string""" ok, u = skytools.safe_utf8_decode(value) if ok: return value _pfx = pfx and (pfx+': ') or "" self.log.info('%sFixed invalid UTF8 in string <%s>', _pfx, value) return u.encode('utf8') # # handler management # _handler_map = { 'londiste': TableHandler, } _handler_list = _handler_map.keys() def register_handler_module(modname): """Import and module and register handlers.""" try: __import__(modname) except ImportError: print "Failed to load handler module: %s" % (modname,) return m = sys.modules[modname] for h in m.__londiste_handlers__: _handler_map[h.handler_name] = h _handler_list.append(h.handler_name) def _parse_arglist(arglist): args = {} for arg in arglist or []: key, _, val = arg.partition('=') key = key.strip() if key in args: raise Exception('multiple handler arguments: %s' % key) args[key] = val.strip() return args def create_handler_string(name, arglist): handler = name if name.find('(') >= 0: raise Exception('invalid handler name: %s' % name) if arglist: args = _parse_arglist(arglist) astr = skytools.db_urlencode(args) handler = '%s(%s)' % (handler, astr) return handler def _parse_handler(hstr): """Parse result of create_handler_string().""" args = {} name = hstr pos = hstr.find('(') if pos > 0: name = hstr[ : pos] if hstr[-1] != ')': raise Exception('invalid handler format: %s' % hstr) astr = hstr[pos + 1 : -1] if astr: astr = astr.replace(',', '&') args = skytools.db_urldecode(astr) return (name, args) def build_handler(tblname, hstr, dest_table=None): """Parse and initialize handler. hstr is result of create_handler_string().""" hname, args = _parse_handler(hstr) # when no handler specified, use londiste hname = hname or 'londiste' klass = _handler_map[hname] if not dest_table: dest_table = tblname return klass(tblname, args, dest_table) def load_handler_modules(cf): """Load and register modules from config.""" lst = londiste.handlers.DEFAULT_HANDLERS lst += cf.getlist('handler_modules', []) for m in lst: register_handler_module(m) def show(mods): if not mods: if 0: names = _handler_map.keys() names.sort() else: names = _handler_list for n in names: kls = _handler_map[n] desc = kls.__doc__ or '' if desc: desc = desc.strip().split('\n', 1)[0] print("%s - %s" % (n, desc)) else: for n in mods: kls = _handler_map[n] desc = kls.__doc__ or '' if desc: desc = desc.strip() print("%s - %s" % (n, desc)) skytools-3.2.6/python/londiste/handlers/0000755000000000000000000000000012426435645015254 5ustar skytools-3.2.6/python/londiste/handlers/applyfn.py0000644000000000000000000000252612426435645017304 0ustar """ Send all events to a DB function. """ import skytools from londiste.handler import BaseHandler __all__ = ['ApplyFuncHandler'] class ApplyFuncHandler(BaseHandler): """Call DB function to apply event. Parameters: func_name=NAME - database function name func_conf=CONF - database function conf """ handler_name = 'applyfn' def prepare_batch(self, batch_info, dst_curs): self.cur_tick = batch_info['tick_id'] def process_event(self, ev, sql_queue_func, qfunc_arg): """Ignore events for this table""" fn = self.args.get('func_name') fnconf = self.args.get('func_conf', '') args = [fnconf, self.cur_tick, ev.ev_id, ev.ev_time, ev.ev_txid, ev.ev_retry, ev.ev_type, ev.ev_data, ev.ev_extra1, ev.ev_extra2, ev.ev_extra3, ev.ev_extra4] qfn = skytools.quote_fqident(fn) qargs = [skytools.quote_literal(a) for a in args] sql = "select %s(%s);" % (qfn, ', '.join(qargs)) self.log.debug('applyfn.sql: %s', sql) sql_queue_func(sql, qfunc_arg) #------------------------------------------------------------------------------ # register handler class #------------------------------------------------------------------------------ __londiste_handlers__ = [ApplyFuncHandler] skytools-3.2.6/python/londiste/handlers/multimaster.py0000644000000000000000000000260012426435645020172 0ustar #!/usr/bin/env python # encoding: utf-8 """ Handler for replica with multiple master nodes. Can only handle initial copy from one master. Add other masters with expect-sync option. NB! needs merge_on_time function to be compiled on database first. """ import skytools from londiste.handlers.applyfn import ApplyFuncHandler from londiste.handlers import update __all__ = ['MultimasterHandler'] class MultimasterHandler(ApplyFuncHandler): __doc__ = __doc__ handler_name = 'multimaster' def __init__(self, table_name, args, dest_table): """Init per-batch table data cache.""" conf = args.copy() # remove Multimaster args from conf for name in ['func_name','func_conf']: if name in conf: conf.pop(name) conf = skytools.db_urlencode(conf) args = update(args, {'func_name': 'merge_on_time', 'func_conf': conf}) ApplyFuncHandler.__init__(self, table_name, args, dest_table) def _check_args (self, args): pass # any arg can be passed def add(self, trigger_arg_list): """Create SKIP and BEFORE INSERT trigger""" trigger_arg_list.append('no_merge') #------------------------------------------------------------------------------ # register handler class #------------------------------------------------------------------------------ __londiste_handlers__ = [MultimasterHandler] skytools-3.2.6/python/londiste/handlers/dispatch.py0000644000000000000000000011217412426435645017433 0ustar """ == HANDLERS == * dispatch - "vanilla" dispatch handler with default args (see below) * hourly_event * hourly_batch * hourly_field * hourly_time * daily_event * daily_batch * daily_field * daily_time * monthly_event * monthly_batch * monthly_field * monthly_time * yearly_event * yearly_batch * yearly_field * yearly_time * bulk_hourly_event * bulk_hourly_batch * bulk_hourly_field * bulk_hourly_time * bulk_daily_event * bulk_daily_batch * bulk_daily_field * bulk_daily_time * bulk_monthly_event * bulk_monthly_batch * bulk_monthly_field * bulk_monthly_time * bulk_yearly_event * bulk_yearly_batch * bulk_yearly_field * bulk_yearly_time * bulk_direct - functionally identical to bulk == HANDLER ARGUMENTS == table_mode: * part - partitioned table (default) * direct - non-partitioned table * ignore - all events are ignored part_func: database function to use for creating partition table. default is {londiste|public}.create_partition part_mode: * batch_time - partitioned by batch creation time (default) * event_time - partitioned by event creation time * date_field - partitioned by date_field value. part_field required * current_time - partitioned by current time part_field: date_field to use for partition. Required when part_mode=date_field period: partition period, used for automatic part_name and part_template building * hour * day - default * month * year part_name: custom name template for partition table. default is None as it is built automatically. example for daily partition: %(parent)s_%(year)s_%(month)s_%(day)s template variables: * parent - parent table name * year * month * day * hour part_template: custom sql template for creating partition table. if omitted then partition function is used. template variables: * dest - destination table name. result on part_name evaluation * part - same as dest * parent - parent table name * pkey - parent table primary keys * schema_table - table name with replace: '.' -> '__'. for using in pk names etc. * part_field - date field name if table is partitioned by field * part_time - time of partition row_mode: how rows are applied to target table * plain - each event creates SQL statement to run (default) * keep_latest - change updates to DELETE + INSERT * keep_all - change updates to inserts, ignore deletes event_types: event types to process, separated by comma. Other events are ignored. default is all event types * I - inserts * U - updates * D - deletes load_mode: how data is loaded to dst database. default direct * direct - using direct sql statements (default) * bulk - using copy to temp table and then sql. method: loading method for load_mode bulk. defaults to 0 * 0 (correct) - inserts as COPY into table, update as COPY into temp table and single UPDATE from there delete as COPY into temp table and single DELETE from there * 1 (delete) - as 'correct', but do update as DELETE + COPY * 2 (merged) - as 'delete', but merge insert rows with update rows * 3 (insert) - COPY inserts into table, error when other events fields: field name map for using just part of the fields and rename them * '*' - all fields. default * [,..] - list of source fields to include in target * : - renaming fields list and rename syntax can be mixed: field1,field2:new_field2,field3 skip_fields: list of field names to skip table: new name of destination table. default is same as source pre_part: sql statement(s) to execute before creating partition table. Usable variables are the same as in part_template post_part: sql statement(s) to execute after creating partition table. Usable variables are the same as in part_template retention_period: how long to keep partitions around. examples: '3 months', '1 year' ignore_old_events: * 0 - handle all events in the same way (default) * 1 - ignore events coming for obsolete partitions ignore_truncate: * 0 - process truncate event (default) * 1 - ignore truncate event encoding: name of destination encoding. handler replaces all invalid encoding symbols and logs them as warnings analyze: * 0 - do not run analyze on temp tables (default) * 1 - run analyze on temp tables == NOTES == NB! londiste3 does not currently support table renaming and field mapping when creating or coping initial data to destination table. --expect-sync and --skip-truncate should be used and --create switch is to be avoided. """ import codecs import datetime import re import sys from functools import partial import skytools from skytools import quote_ident, quote_fqident, UsageError from skytools.dbstruct import * from skytools.utf8 import safe_utf8_decode from londiste.handler import EncodingValidator from londiste.handlers import handler_args, update from londiste.handlers.shard import ShardHandler __all__ = ['Dispatcher'] # BulkLoader load method METH_CORRECT = 0 METH_DELETE = 1 METH_MERGED = 2 METH_INSERT = 3 # BulkLoader hacks AVOID_BIZGRES_BUG = 0 USE_LONGLIVED_TEMP_TABLES = True USE_REAL_TABLE = False # mode variables (first in list is default value) TABLE_MODES = ['part', 'direct', 'ignore'] PART_MODES = ['batch_time', 'event_time', 'date_field', 'current_time'] ROW_MODES = ['plain', 'keep_all', 'keep_latest'] LOAD_MODES = ['direct', 'bulk'] PERIODS = ['day', 'month', 'year', 'hour'] METHODS = [METH_CORRECT, METH_DELETE, METH_MERGED, METH_INSERT] EVENT_TYPES = ['I', 'U', 'D'] PART_FUNC_OLD = 'public.create_partition' PART_FUNC_NEW = 'londiste.create_partition' PART_FUNC_ARGS = ['parent', 'part', 'pkeys', 'part_field', 'part_time', 'period'] RETENTION_FUNC = "londiste.drop_obsolete_partitions" #------------------------------------------------------------------------------ # LOADERS #------------------------------------------------------------------------------ class BaseLoader: def __init__(self, table, pkeys, log, conf): self.table = table self.pkeys = pkeys self.log = log self.conf = conf or {} def process(self, op, row): raise NotImplementedError() def flush(self, curs): raise NotImplementedError() class DirectLoader(BaseLoader): def __init__(self, table, pkeys, log, conf): BaseLoader.__init__(self, table, pkeys, log, conf) self.data = [] def process(self, op, row): self.data.append((op, row)) def flush(self, curs): mk_sql = {'I': skytools.mk_insert_sql, 'U': skytools.mk_update_sql, 'D': skytools.mk_delete_sql} if self.data: curs.execute("\n".join(mk_sql[op](row, self.table, self.pkeys) for op, row in self.data)) class BaseBulkCollectingLoader(BaseLoader): """ Collect events into I,U,D lists by pk and keep only last event with most suitable operation. For example when event has operations I,U,U keep only last U, when I,U,D, keep nothing etc If after processing the op is not in I,U or D, then ignore that event for rest """ OP_GRAPH = {None:{'U':'U', 'I':'I', 'D':'D'}, 'I':{'D':'.'}, 'U':{'D':'D'}, 'D':{'I':'U'}, '.':{'I':'I'}, } def __init__(self, table, pkeys, log, conf): BaseLoader.__init__(self, table, pkeys, log, conf) if not self.pkeys: raise Exception('non-pk tables not supported: %s' % self.table) self.pkey_ev_map = {} def process(self, op, row): """Collect rows into pk dict, keeping only last row with most suitable op""" pk_data = tuple(row[k] for k in self.pkeys) # get current op state, None if first event _op = self.pkey_ev_map.get(pk_data, (None,))[0] # find new state and store together with row data try: # get new op state using op graph # when no edge defined for old -> new op, keep old _op = self.OP_GRAPH[_op].get(op, _op) self.pkey_ev_map[pk_data] = (_op, row) # skip update to pk-only table if len(pk_data) == len(row) and _op == 'U': del self.pkey_ev_map[pk_data] except KeyError: raise Exception('unknown event type: %s' % op) def collect_data(self): """Collects list of rows into operation hashed dict """ op_map = {'I': [], 'U': [], 'D': []} for op, row in self.pkey_ev_map.itervalues(): # ignore None op events if op in op_map: op_map[op].append(row) return op_map def flush(self, curs): op_map = self.collect_data() self.bulk_flush(curs, op_map) def bulk_flush(self, curs, op_map): pass class BaseBulkTempLoader(BaseBulkCollectingLoader): """ Provide methods for operating bulk collected events with temp table """ def __init__(self, table, pkeys, log, conf): BaseBulkCollectingLoader.__init__(self, table, pkeys, log, conf) # temp table name if USE_REAL_TABLE: self.temp = self.table + "_loadertmpx" self.qtemp = quote_fqident(self.temp) else: self.temp = self.table.replace('.', '_') + "_loadertmp" self.qtemp = quote_ident(self.temp) # quoted table name self.qtable = quote_fqident(self.table) # all fields self.fields = None # key fields used in where part, possible to add non pk fields # (like dist keys in gp) self.keys = self.pkeys[:] def nonkeys(self): """returns fields not in keys""" return [f for f in self.fields if f not in self.keys] def logexec(self, curs, sql): """Logs and executes sql statement""" self.log.debug('exec: %s', sql) curs.execute(sql) self.log.debug('msg: %s, rows: %s', curs.statusmessage, curs.rowcount) # create sql parts def _where(self): tmpl = "%(tbl)s.%(col)s = t.%(col)s" stmt = (tmpl % {'col': quote_ident(f), 'tbl': self.qtable, } for f in self.keys) return ' and '.join(stmt) def _cols(self): return ','.join(quote_ident(f) for f in self.fields) def insert(self, curs): sql = "insert into %s (%s) select %s from %s" % ( self.qtable, self._cols(), self._cols(), self.qtemp) return self.logexec(curs, sql) def update(self, curs): qcols = [quote_ident(c) for c in self.nonkeys()] # no point to update pk-only table if not qcols: return tmpl = "%s = t.%s" eqlist = [tmpl % (c,c) for c in qcols] _set = ", ".join(eqlist) sql = "update only %s set %s from %s as t where %s" % ( self.qtable, _set, self.qtemp, self._where()) return self.logexec(curs, sql) def delete(self, curs): sql = "delete from only %s using %s as t where %s" % ( self.qtable, self.qtemp, self._where()) return self.logexec(curs, sql) def truncate(self, curs): return self.logexec(curs, "truncate %s" % self.qtemp) def drop(self, curs): return self.logexec(curs, "drop table %s" % self.qtemp) def create(self, curs): if USE_REAL_TABLE: tmpl = "create table %s (like %s)" else: tmpl = "create temp table %s (like %s) on commit preserve rows" return self.logexec(curs, tmpl % (self.qtemp, self.qtable)) def analyze(self, curs): return self.logexec(curs, "analyze %s" % self.qtemp) def process(self, op, row): BaseBulkCollectingLoader.process(self, op, row) # TODO: maybe one assignment is enough? self.fields = row.keys() class BulkLoader(BaseBulkTempLoader): """ Collects events to and loads bulk data using copy and temp tables """ def __init__(self, table, pkeys, log, conf): BaseBulkTempLoader.__init__(self, table, pkeys, log, conf) self.method = self.conf['method'] self.run_analyze = self.conf['analyze'] self.dist_fields = None # is temp table created self.temp_present = False def process(self, op, row): if self.method == METH_INSERT and op != 'I': raise Exception('%s not supported by method insert' % op) BaseBulkTempLoader.process(self, op, row) def process_delete(self, curs, op_map): """Process delete list""" data = op_map['D'] cnt = len(data) if (cnt == 0): return self.log.debug("bulk: Deleting %d rows from %s", cnt, self.table) # copy rows to temp self.bulk_insert(curs, data) # delete rows using temp self.delete(curs) # check if right amount of rows deleted (only in direct mode) if self.conf.table_mode == 'direct' and cnt != curs.rowcount: self.log.warning("%s: Delete mismatch: expected=%s deleted=%d", self.table, cnt, curs.rowcount) def process_update(self, curs, op_map): """Process update list""" data = op_map['U'] # original update list count real_cnt = len(data) # merged method loads inserts together with updates if self.method == METH_MERGED: data += op_map['I'] cnt = len(data) if (cnt == 0): return self.log.debug("bulk: Updating %d rows in %s", cnt, self.table) # copy rows to temp self.bulk_insert(curs, data) if self.method == METH_CORRECT: # update main table from temp self.update(curs) # check count (only in direct mode) if self.conf.table_mode == 'direct' and cnt != curs.rowcount: self.log.warning("%s: Update mismatch: expected=%s updated=%d", self.table, cnt, curs.rowcount) else: # delete from main table using temp self.delete(curs) # check count (only in direct mode) if self.conf.table_mode == 'direct' and real_cnt != curs.rowcount: self.log.warning("%s: Update mismatch: expected=%s deleted=%d", self.table, real_cnt, curs.rowcount) # insert into main table if AVOID_BIZGRES_BUG: # copy again, into main table self.bulk_insert(curs, data, table = self.qtable) else: # insert from temp - better way, but does not work # due bizgres bug self.insert(curs) def process_insert(self, curs, op_map): """Process insert list""" data = op_map['I'] cnt = len(data) # merged method loads inserts together with updates if (cnt == 0) or (self.method == METH_MERGED): return self.log.debug("bulk: Inserting %d rows into %s", cnt, self.table) # copy into target table (no temp used) self.bulk_insert(curs, data, table = self.qtable) def bulk_flush(self, curs, op_map): self.log.debug("bulk_flush: %s (I/U/D = %d/%d/%d)", self.table, len(op_map['I']), len(op_map['U']), len(op_map['D'])) # fetch distribution fields if self.dist_fields is None: self.dist_fields = self.find_dist_fields(curs) self.log.debug("Key fields: %s Dist fields: %s", ",".join(self.pkeys), ",".join(self.dist_fields)) # add them to key for key in self.dist_fields: if key not in self.keys: self.keys.append(key) # check if temp table present self.check_temp(curs) # process I,U,D self.process_delete(curs, op_map) self.process_update(curs, op_map) self.process_insert(curs, op_map) # truncate or drop temp table self.clean_temp(curs) def check_temp(self, curs): if USE_REAL_TABLE: self.temp_present = skytools.exists_table(curs, self.temp) else: self.temp_present = skytools.exists_temp_table(curs, self.temp) def clean_temp(self, curs): # delete remaining rows if self.temp_present: if USE_LONGLIVED_TEMP_TABLES or USE_REAL_TABLE: self.truncate(curs) else: # fscking problems with long-lived temp tables self.drop(curs) def create_temp(self, curs): """ check if temp table exists. Returns False if using existing temp table and True if creating new """ if USE_LONGLIVED_TEMP_TABLES or USE_REAL_TABLE: if self.temp_present: self.log.debug("bulk: Using existing temp table %s", self.temp) return False self.create(curs) self.temp_present = True return True def bulk_insert(self, curs, data, table = None): """Copy data to table. If table not provided, use temp table. When re-using existing temp table, it is always truncated first and analyzed after copy. """ if not data: return _use_temp = table is None # if table not specified use temp if _use_temp: table = self.temp # truncate when re-using existing table if not self.create_temp(curs): self.truncate(curs) self.log.debug("bulk: COPY %d rows into %s", len(data), table) skytools.magic_insert(curs, table, data, self.fields, quoted_table = True) if _use_temp and self.run_analyze: self.analyze(curs) def find_dist_fields(self, curs): """Find GP distribution keys""" if not skytools.exists_table(curs, "pg_catalog.gp_distribution_policy"): return [] schema, name = skytools.fq_name_parts(self.table) qry = "select a.attname"\ " from pg_class t, pg_namespace n, pg_attribute a,"\ " gp_distribution_policy p"\ " where n.oid = t.relnamespace"\ " and p.localoid = t.oid"\ " and a.attrelid = t.oid"\ " and a.attnum = any(p.attrnums)"\ " and n.nspname = %s and t.relname = %s" curs.execute(qry, [schema, name]) res = [] for row in curs.fetchall(): res.append(row[0]) return res LOADERS = {'direct': DirectLoader, 'bulk': BulkLoader} #------------------------------------------------------------------------------ # ROW HANDLERS #------------------------------------------------------------------------------ class RowHandler: def __init__(self, log): self.log = log self.table_map = {} def add_table(self, table, ldr_cls, pkeys, args): self.table_map[table] = ldr_cls(table, pkeys, self.log, args) def process(self, table, op, row): try: self.table_map[table].process(op, row) except KeyError: raise Exception("No loader for table %s" % table) def flush(self, curs): for ldr in self.table_map.values(): ldr.flush(curs) class KeepAllRowHandler(RowHandler): def process(self, table, op, row): """Keep all row versions. Updates are changed to inserts, deletes are ignored. Makes sense only for partitioned tables. """ if op == 'U': op = 'I' elif op == 'D': return RowHandler.process(self, table, op, row) class KeepLatestRowHandler(RowHandler): def process(self, table, op, row): """Keep latest row version. Updates are changed to delete + insert Makes sense only for partitioned tables. """ if op == 'U': RowHandler.process(self, table, 'D', row) RowHandler.process(self, table, 'I', row) elif op == 'I': RowHandler.process(self, table, 'I', row) elif op == 'D': RowHandler.process(self, table, 'D', row) ROW_HANDLERS = {'plain': RowHandler, 'keep_all': KeepAllRowHandler, 'keep_latest': KeepLatestRowHandler} #------------------------------------------------------------------------------ # DISPATCHER #------------------------------------------------------------------------------ class Dispatcher (ShardHandler): """Partitioned loader. Splits events into partitions, if requested. Then applies them without further processing. """ handler_name = 'dispatch' def __init__(self, table_name, args, dest_table): # compat for dest-table dest_table = args.get('table', dest_table) ShardHandler.__init__(self, table_name, args, dest_table) # show args self.log.debug("dispatch.init: table_name=%r, args=%r", table_name, args) self.ignored_tables = set() self.batch_info = None self.dst_curs = None self.pkeys = None # config hdlr_cls = ROW_HANDLERS[self.conf.row_mode] self.row_handler = hdlr_cls(self.log) def _parse_args_from_doc (self): doc = __doc__ params_descr = [] params_found = False for line in doc.splitlines(): ln = line.strip() if params_found: if ln.startswith("=="): break m = re.match ("^(\w+):$", ln) if m: name = m.group(1) expr = text = "" elif not params_descr: continue else: name, expr, text = params_descr.pop() text += ln + "\n" params_descr.append ((name, expr, text)) elif ln == "== HANDLER ARGUMENTS ==": params_found = True return params_descr def get_config(self): """Processes args dict""" conf = ShardHandler.get_config(self) # set table mode conf.table_mode = self.get_arg('table_mode', TABLE_MODES) conf.analyze = self.get_arg('analyze', [0, 1]) if conf.table_mode == 'part': conf.part_mode = self.get_arg('part_mode', PART_MODES) conf.part_field = self.args.get('part_field') if conf.part_mode == 'date_field' and not conf.part_field : raise Exception('part_mode date_field requires part_field!') conf.period = self.get_arg('period', PERIODS) conf.part_name = self.args.get('part_name') conf.part_template = self.args.get('part_template') conf.pre_part = self.args.get('pre_part') conf.post_part = self.args.get('post_part') conf.part_func = self.args.get('part_func', PART_FUNC_NEW) conf.retention_period = self.args.get('retention_period') conf.ignore_old_events = self.get_arg('ignore_old_events', [0, 1], 0) # set row mode and event types to process conf.row_mode = self.get_arg('row_mode', ROW_MODES) event_types = self.args.get('event_types', '*') if event_types == '*': event_types = EVENT_TYPES else: event_types = [evt.upper() for evt in event_types.split(',')] for evt in event_types: if evt not in EVENT_TYPES: raise Exception('Unsupported operation: %s' % evt) conf.event_types = event_types # set load handler conf.load_mode = self.get_arg('load_mode', LOAD_MODES) conf.method = self.get_arg('method', METHODS) # fields to skip conf.skip_fields = [f.strip().lower() for f in self.args.get('skip_fields','').split(',')] # get fields map (obsolete, for compatibility reasons) fields = self.args.get('fields', '*') if fields == "*": conf.field_map = None else: conf.field_map = {} for fval in fields.split(','): tmp = fval.split(':') if len(tmp) == 1: conf.field_map[tmp[0]] = tmp[0] else: conf.field_map[tmp[0]] = tmp[1] return conf def _validate_hash_key(self): pass # no need for hash key when not sharding def reset(self): """Called before starting to process a batch. Should clean any pending data.""" ShardHandler.reset(self) def prepare_batch(self, batch_info, dst_curs): """Called on first event for this table in current batch.""" if self.conf.table_mode != 'ignore': self.batch_info = batch_info self.dst_curs = dst_curs ShardHandler.prepare_batch(self, batch_info, dst_curs) def filter_data(self, data): """Process with fields skip and map""" fskip = self.conf.skip_fields fmap = self.conf.field_map if fskip: data = dict((k, v) for k, v in data.items() if k not in fskip) if fmap: # when field name not present in source is used then None (NULL) # value is inserted. is it ok? data = dict( (v, data.get(k)) for k, v in fmap.items()) return data def filter_pkeys(self, pkeys): """Process with fields skip and map""" fskip = self.conf.skip_fields fmap = self.conf.field_map if fskip: pkeys = [f for f in pkeys if f not in fskip] if fmap: pkeys = [fmap[p] for p in pkeys if p in fmap] return pkeys def _process_event(self, ev, sql_queue_func, arg): """Process a event. Event should be added to sql_queue or executed directly. """ if self.conf.table_mode == 'ignore': return # get data data = skytools.db_urldecode(ev.data) if self.encoding_validator: data = self.encoding_validator.validate_dict(data, self.table_name) if len(ev.ev_type) < 2 or ev.ev_type[1] != ':': raise Exception('Unsupported event type: %s/extra1=%s/data=%s' % ( ev.ev_type, ev.ev_extra1, ev.ev_data)) op, pkeys = ev.type.split(':', 1) if op not in 'IUD': raise Exception('Unknown event type: %s' % ev.ev_type) # process only operations specified if not op in self.conf.event_types: #self.log.debug('dispatch.process_event: ignored event type') return if self.pkeys is None: self.pkeys = self.filter_pkeys(pkeys.split(',')) data = self.filter_data(data) # prepare split table when needed if self.conf.table_mode == 'part': dst, part_time = self.split_format(ev, data) if dst in self.ignored_tables: return if dst not in self.row_handler.table_map: self.check_part(dst, part_time) if dst in self.ignored_tables: return else: dst = self.dest_table if dst not in self.row_handler.table_map: self.row_handler.add_table(dst, LOADERS[self.conf.load_mode], self.pkeys, self.conf) self.row_handler.process(dst, op, data) def finish_batch(self, batch_info, dst_curs): """Called when batch finishes.""" if self.conf.table_mode != 'ignore': self.row_handler.flush(dst_curs) #ShardHandler.finish_batch(self, batch_info, dst_curs) def get_part_name(self): # if custom part name template given, use it if self.conf.part_name: return self.conf.part_name parts = ['year', 'month', 'day', 'hour'] name_parts = ['parent'] + parts[:parts.index(self.conf.period)+1] return '_'.join('%%(%s)s' % part for part in name_parts) def split_format(self, ev, data): """Generates part table name from template""" if self.conf.part_mode == 'batch_time': dtm = self.batch_info['batch_end'] elif self.conf.part_mode == 'event_time': dtm = ev.ev_time elif self.conf.part_mode == 'current_time': dtm = datetime.datetime.now() elif self.conf.part_mode == 'date_field': dt_str = data[self.conf.part_field] if dt_str is None: raise Exception('part_field(%s) is NULL: %s' % (self.conf.part_field, ev)) dtm = datetime.datetime.strptime(dt_str[:19], "%Y-%m-%d %H:%M:%S") else: raise UsageError('Bad value for part_mode: %s' %\ self.conf.part_mode) vals = {'parent': self.dest_table, 'year': "%04d" % dtm.year, 'month': "%02d" % dtm.month, 'day': "%02d" % dtm.day, 'hour': "%02d" % dtm.hour, } return (self.get_part_name() % vals, dtm) def check_part(self, dst, part_time): """Create part table if not exists. It part_template present, execute it else if part function present in db, call it else clone master table""" curs = self.dst_curs if (self.conf.ignore_old_events and self.conf.retention_period and self.is_obsolete_partition (dst, self.conf.retention_period, self.conf.period)): self.ignored_tables.add(dst) return if skytools.exists_table(curs, dst): return dst = quote_fqident(dst) vals = {'dest': dst, 'part': dst, 'parent': self.fq_dest_table, 'pkeys': ",".join(self.pkeys), # quoting? # we do this to make sure that constraints for # tables who contain a schema will still work 'schema_table': dst.replace(".", "__"), 'part_field': self.conf.part_field, 'part_time': part_time, 'period': self.conf.period, } def exec_with_vals(tmpl): if tmpl: sql = tmpl % vals curs.execute(sql) return True return False exec_with_vals(self.conf.pre_part) if not exec_with_vals(self.conf.part_template): self.log.debug('part_template not provided, using part func') # if part func exists call it with val arguments pfargs = ', '.join('%%(%s)s' % arg for arg in PART_FUNC_ARGS) # set up configured function pfcall = 'select %s(%s)' % (self.conf.part_func, pfargs) have_func = skytools.exists_function(curs, self.conf.part_func, len(PART_FUNC_ARGS)) # backwards compat if not have_func and self.conf.part_func == PART_FUNC_NEW: pfcall = 'select %s(%s)' % (PART_FUNC_OLD, pfargs) have_func = skytools.exists_function(curs, PART_FUNC_OLD, len(PART_FUNC_ARGS)) if have_func: self.log.debug('check_part.exec: func: %s, args: %s', pfcall, vals) curs.execute(pfcall, vals) else: # # Otherwise create simple clone. # # FixMe: differences from create_partitions(): # - check constraints # - inheritance # self.log.debug('part func %s not found, cloning table', self.conf.part_func) struct = TableStruct(curs, self.dest_table) struct.create(curs, T_ALL, dst) exec_with_vals(self.conf.post_part) self.log.info("Created table: %s", dst) if self.conf.retention_period: dropped = self.drop_obsolete_partitions (self.dest_table, self.conf.retention_period, self.conf.period) if self.conf.ignore_old_events and dropped: for tbl in dropped: self.ignored_tables.add(tbl) if tbl in self.row_handler.table_map: del self.row_handler.table_map[tbl] def drop_obsolete_partitions (self, parent_table, retention_period, partition_period): """ Drop obsolete partitions of partition-by-date parent table. """ curs = self.dst_curs func = RETENTION_FUNC args = [parent_table, retention_period, partition_period] sql = "select " + func + " (%s, %s, %s)" self.log.debug("func: %s, args: %s", func, args) curs.execute(sql, args) res = [row[0] for row in curs.fetchall()] if res: self.log.info("Dropped tables: %s", ", ".join(res)) return res def is_obsolete_partition (self, partition_table, retention_period, partition_period): """ Test partition name of partition-by-date parent table. """ curs = self.dst_curs func = "londiste.is_obsolete_partition" args = [partition_table, retention_period, partition_period] sql = "select " + func + " (%s, %s, %s)" self.log.debug("func: %s, args: %s", func, args) curs.execute(sql, args) res = curs.fetchone()[0] if res: self.log.info("Ignored table: %s", partition_table) return res def get_copy_condition(self, src_curs, dst_curs): """ Prepare where condition for copy and replay filtering. """ return ShardHandler.get_copy_condition(self, src_curs, dst_curs) def real_copy(self, tablename, src_curs, dst_curs, column_list): """do actual table copy and return tuple with number of bytes and rows copied """ _src_cols = _dst_cols = column_list condition = self.get_copy_condition (src_curs, dst_curs) if self.conf.skip_fields: _src_cols = [col for col in column_list if col not in self.conf.skip_fields] _dst_cols = _src_cols if self.conf.field_map: _src_cols = [col for col in _src_cols if col in self.conf.field_map] _dst_cols = [self.conf.field_map[col] for col in _src_cols] if self.encoding_validator: def _write_hook(obj, data): return self.encoding_validator.validate_copy(data, _src_cols, tablename) else: _write_hook = None return skytools.full_copy(tablename, src_curs, dst_curs, _src_cols, condition, dst_tablename = self.dest_table, dst_column_list = _dst_cols, write_hook = _write_hook) # add arguments' description to handler's docstring found = False for line in __doc__.splitlines(): if line.startswith ("== HANDLER ARGUMENTS =="): found = True if found: Dispatcher.__doc__ += "\n" + line del found #------------------------------------------------------------------------------ # register handler class #------------------------------------------------------------------------------ __londiste_handlers__ = [Dispatcher] #------------------------------------------------------------------------------ # helper function for creating dispatchers with different default values #------------------------------------------------------------------------------ handler_args = partial(handler_args, cls=Dispatcher) #------------------------------------------------------------------------------ # build set of handlers with different default values for easier use #------------------------------------------------------------------------------ LOAD = { '': { 'load_mode': 'direct' }, 'bulk': { 'load_mode': 'bulk' } } PERIOD = { 'hourly': { 'period': 'hour' }, 'daily' : { 'period': 'day' }, 'monthly': { 'period': 'month' }, 'yearly': { 'period': 'year' }, } MODE = { 'event': { 'part_mode': 'event_time' }, 'batch': { 'part_mode': 'batch_time' }, 'field': { 'part_mode': 'date_field' }, 'time': { 'part_mode': 'current_time' }, } BASE = { 'table_mode': 'part', 'row_mode': 'keep_latest', } def set_handler_doc (cls, defs): """ generate handler docstring """ cls.__doc__ = "Custom dispatch handler with default args.\n\n" \ "Parameters:\n" for k,v in defs.items(): cls.__doc__ += " %s = %s\n" % (k,v) for load, load_dict in LOAD.items(): for period, period_dict in PERIOD.items(): for mode, mode_dict in MODE.items(): # define creator func to keep default dicts in separate context def create_handler(): handler_name = '_'.join(p for p in (load, period, mode) if p) default = update(mode_dict, period_dict, load_dict, BASE) @handler_args(handler_name) def handler_func(args): return update(args, default) create_handler() hcls = __londiste_handlers__[-1] # it was just added defs = update(mode_dict, period_dict, load_dict, BASE) set_handler_doc (hcls, defs) del (hcls, defs) @handler_args('bulk_direct') def bulk_direct_handler(args): return update(args, {'load_mode': 'bulk', 'table_mode': 'direct'}) set_handler_doc (__londiste_handlers__[-1], {'load_mode': 'bulk', 'table_mode': 'direct'}) @handler_args('direct') def direct_handler(args): return update(args, {'load_mode': 'direct', 'table_mode': 'direct'}) set_handler_doc (__londiste_handlers__[-1], {'load_mode': 'direct', 'table_mode': 'direct'}) skytools-3.2.6/python/londiste/handlers/vtable.py0000644000000000000000000000114312426435645017102 0ustar """Virtual Table handler. Hack to get local=t for a table, but without processing any events. """ from londiste.handler import BaseHandler __all__ = ['VirtualTableHandler', 'FakeLocalHandler'] class VirtualTableHandler(BaseHandler): __doc__ = __doc__ handler_name = 'vtable' def add(self, trigger_arg_list): trigger_arg_list.append('virtual_table') def needs_table(self): return False class FakeLocalHandler(VirtualTableHandler): """Deprecated compat name for vtable.""" handler_name = 'fake_local' __londiste_handlers__ = [VirtualTableHandler, FakeLocalHandler] skytools-3.2.6/python/londiste/handlers/bulk.py0000644000000000000000000003230512426435645016566 0ustar """ Bulk loading into OLAP database. To use set in londiste.ini: handler_modules = londiste.handlers.bulk then add table with: londiste3 add-table xx --handler="bulk" or: londiste3 add-table xx --handler="bulk(method=X)" Methods: 0 (correct) - inserts as COPY into table, update as COPY into temp table and single UPDATE from there delete as COPY into temp table and single DELETE from there 1 (delete) - as 'correct', but do update as DELETE + COPY 2 (merged) - as 'delete', but merge insert rows with update rows Default is 0. """ import skytools from londiste.handler import BaseHandler, RowCache from skytools import quote_ident, quote_fqident __all__ = ['BulkLoader'] # BulkLoader load method METH_CORRECT = 0 METH_DELETE = 1 METH_MERGED = 2 DEFAULT_METHOD = METH_CORRECT # BulkLoader hacks AVOID_BIZGRES_BUG = 0 USE_LONGLIVED_TEMP_TABLES = True USE_REAL_TABLE = False class BulkEvent(object): """Helper class for BulkLoader to store relevant data.""" __slots__ = ('op', 'data', 'pk_data') def __init__(self, op, data, pk_data): self.op = op self.data = data self.pk_data = pk_data class BulkLoader(BaseHandler): """Bulk loading into OLAP database. Instead of statement-per-event, load all data with one big COPY, UPDATE or DELETE statement. Parameters: method=TYPE - method to use for copying [0..2] (default: 0) Methods: 0 (correct) - inserts as COPY into table, update as COPY into temp table and single UPDATE from there delete as COPY into temp table and single DELETE from there 1 (delete) - as 'correct', but do update as DELETE + COPY 2 (merged) - as 'delete', but merge insert rows with update rows """ handler_name = 'bulk' fake_seq = 0 def __init__(self, table_name, args, dest_table): """Init per-batch table data cache.""" BaseHandler.__init__(self, table_name, args, dest_table) self.pkey_list = None self.dist_fields = None self.col_list = None self.pkey_ev_map = {} self.method = int(args.get('method', DEFAULT_METHOD)) if not self.method in (0,1,2): raise Exception('unknown method: %s' % self.method) self.log.debug('bulk_init(%r), method=%d', args, self.method) def reset(self): self.pkey_ev_map = {} BaseHandler.reset(self) def finish_batch(self, batch_info, dst_curs): self.bulk_flush(dst_curs) def process_event(self, ev, sql_queue_func, arg): if len(ev.ev_type) < 2 or ev.ev_type[1] != ':': raise Exception('Unsupported event type: %s/extra1=%s/data=%s' % ( ev.ev_type, ev.ev_extra1, ev.ev_data)) op = ev.ev_type[0] if op not in 'IUD': raise Exception('Unknown event type: '+ev.ev_type) # pkey_list = ev.ev_type[2:].split(',') data = skytools.db_urldecode(ev.ev_data) # get pkey value if self.pkey_list is None: #self.pkey_list = pkey_list self.pkey_list = ev.ev_type[2:].split(',') if len(self.pkey_list) > 0: pk_data = tuple(data[k] for k in self.pkey_list) elif op == 'I': # fake pkey, just to get them spread out pk_data = self.fake_seq self.fake_seq += 1 else: raise Exception('non-pk tables not supported: %s' % self.table_name) # get full column list, detect added columns if not self.col_list: self.col_list = data.keys() elif self.col_list != data.keys(): # ^ supposedly python guarantees same order in keys() self.col_list = data.keys() # keep all versions of row data ev = BulkEvent(op, data, pk_data) if ev.pk_data in self.pkey_ev_map: self.pkey_ev_map[ev.pk_data].append(ev) else: self.pkey_ev_map[ev.pk_data] = [ev] def prepare_data(self): """Got all data, prepare for insertion.""" del_list = [] ins_list = [] upd_list = [] for ev_list in self.pkey_ev_map.itervalues(): # rewrite list of I/U/D events to # optional DELETE and optional INSERT/COPY command exists_before = -1 exists_after = 1 for ev in ev_list: if ev.op == "I": if exists_before < 0: exists_before = 0 exists_after = 1 elif ev.op == "U": if exists_before < 0: exists_before = 1 #exists_after = 1 # this shouldnt be needed elif ev.op == "D": if exists_before < 0: exists_before = 1 exists_after = 0 else: raise Exception('unknown event type: %s' % ev.op) # skip short-lived rows if exists_before == 0 and exists_after == 0: continue # take last event ev = ev_list[-1] # generate needed commands if exists_before and exists_after: upd_list.append(ev.data) elif exists_before: del_list.append(ev.data) elif exists_after: ins_list.append(ev.data) return ins_list, upd_list, del_list def bulk_flush(self, curs): ins_list, upd_list, del_list = self.prepare_data() # reorder cols, put pks first col_list = self.pkey_list[:] for k in self.col_list: if k not in self.pkey_list: col_list.append(k) real_update_count = len(upd_list) self.log.debug("bulk_flush: %s (I/U/D = %d/%d/%d)", self.table_name, len(ins_list), len(upd_list), len(del_list)) # hack to unbroke stuff if self.method == METH_MERGED: upd_list += ins_list ins_list = [] # fetch distribution fields if self.dist_fields is None: self.dist_fields = self.find_dist_fields(curs) key_fields = self.pkey_list[:] for fld in self.dist_fields: if fld not in key_fields: key_fields.append(fld) self.log.debug("PKey fields: %s Dist fields: %s", ",".join(self.pkey_list), ",".join(self.dist_fields)) # create temp table temp, qtemp = self.create_temp_table(curs) tbl = self.dest_table qtbl = self.fq_dest_table # where expr must have pkey and dist fields klist = [] for pk in key_fields: exp = "%s.%s = %s.%s" % (qtbl, quote_ident(pk), qtemp, quote_ident(pk)) klist.append(exp) whe_expr = " and ".join(klist) # create del sql del_sql = "delete from only %s using %s where %s" % (qtbl, qtemp, whe_expr) # create update sql slist = [] for col in col_list: if col not in key_fields: exp = "%s = %s.%s" % (quote_ident(col), qtemp, quote_ident(col)) slist.append(exp) upd_sql = "update only %s set %s from %s where %s" % ( qtbl, ", ".join(slist), qtemp, whe_expr) # avoid updates on pk-only table if not slist: upd_list = [] # insert sql colstr = ",".join([quote_ident(c) for c in col_list]) ins_sql = "insert into %s (%s) select %s from %s" % ( qtbl, colstr, colstr, qtemp) temp_used = False # process deleted rows if len(del_list) > 0: self.log.debug("bulk: Deleting %d rows from %s", len(del_list), tbl) # delete old rows q = "truncate %s" % qtemp self.log.debug('bulk: %s', q) curs.execute(q) # copy rows self.log.debug("bulk: COPY %d rows into %s", len(del_list), temp) skytools.magic_insert(curs, qtemp, del_list, col_list, quoted_table=1) # delete rows self.log.debug('bulk: %s', del_sql) curs.execute(del_sql) self.log.debug("bulk: %s - %d", curs.statusmessage, curs.rowcount) if len(del_list) != curs.rowcount: self.log.warning("Delete mismatch: expected=%s deleted=%d", len(del_list), curs.rowcount) temp_used = True # process updated rows if len(upd_list) > 0: self.log.debug("bulk: Updating %d rows in %s", len(upd_list), tbl) # delete old rows q = "truncate %s" % qtemp self.log.debug('bulk: %s', q) curs.execute(q) # copy rows self.log.debug("bulk: COPY %d rows into %s", len(upd_list), temp) skytools.magic_insert(curs, qtemp, upd_list, col_list, quoted_table=1) temp_used = True if self.method == METH_CORRECT: # update main table self.log.debug('bulk: %s', upd_sql) curs.execute(upd_sql) self.log.debug("bulk: %s - %d", curs.statusmessage, curs.rowcount) # check count if len(upd_list) != curs.rowcount: self.log.warning("Update mismatch: expected=%s updated=%d", len(upd_list), curs.rowcount) else: # delete from main table self.log.debug('bulk: %s', del_sql) curs.execute(del_sql) self.log.debug('bulk: %s', curs.statusmessage) # check count if real_update_count != curs.rowcount: self.log.warning("bulk: Update mismatch: expected=%s deleted=%d", real_update_count, curs.rowcount) # insert into main table if AVOID_BIZGRES_BUG: # copy again, into main table self.log.debug("bulk: COPY %d rows into %s", len(upd_list), tbl) skytools.magic_insert(curs, qtbl, upd_list, col_list, quoted_table=1) else: # better way, but does not work due bizgres bug self.log.debug('bulk: %s', ins_sql) curs.execute(ins_sql) self.log.debug('bulk: %s', curs.statusmessage) # process new rows if len(ins_list) > 0: self.log.debug("bulk: Inserting %d rows into %s", len(ins_list), tbl) self.log.debug("bulk: COPY %d rows into %s", len(ins_list), tbl) skytools.magic_insert(curs, qtbl, ins_list, col_list, quoted_table=1) # delete remaining rows if temp_used: if USE_LONGLIVED_TEMP_TABLES or USE_REAL_TABLE: q = "truncate %s" % qtemp else: # fscking problems with long-lived temp tables q = "drop table %s" % qtemp self.log.debug('bulk: %s', q) curs.execute(q) self.reset() def create_temp_table(self, curs): if USE_REAL_TABLE: tempname = self.dest_table + "_loadertmpx" else: # create temp table for loading tempname = self.dest_table.replace('.', '_') + "_loadertmp" # check if exists if USE_REAL_TABLE: if skytools.exists_table(curs, tempname): self.log.debug("bulk: Using existing real table %s", tempname) return tempname, quote_fqident(tempname) # create non-temp table q = "create table %s (like %s)" % ( quote_fqident(tempname), quote_fqident(self.dest_table)) self.log.debug("bulk: Creating real table: %s", q) curs.execute(q) return tempname, quote_fqident(tempname) elif USE_LONGLIVED_TEMP_TABLES: if skytools.exists_temp_table(curs, tempname): self.log.debug("bulk: Using existing temp table %s", tempname) return tempname, quote_ident(tempname) # bizgres crashes on delete rows # removed arg = "on commit delete rows" arg = "on commit preserve rows" # create temp table for loading q = "create temp table %s (like %s) %s" % ( quote_ident(tempname), quote_fqident(self.dest_table), arg) self.log.debug("bulk: Creating temp table: %s", q) curs.execute(q) return tempname, quote_ident(tempname) def find_dist_fields(self, curs): if not skytools.exists_table(curs, "pg_catalog.gp_distribution_policy"): return [] schema, name = skytools.fq_name_parts(self.dest_table) q = "select a.attname"\ " from pg_class t, pg_namespace n, pg_attribute a,"\ " gp_distribution_policy p"\ " where n.oid = t.relnamespace"\ " and p.localoid = t.oid"\ " and a.attrelid = t.oid"\ " and a.attnum = any(p.attrnums)"\ " and n.nspname = %s and t.relname = %s" curs.execute(q, [schema, name]) res = [] for row in curs.fetchall(): res.append(row[0]) return res # register handler class __londiste_handlers__ = [BulkLoader] skytools-3.2.6/python/londiste/handlers/qtable.py0000644000000000000000000000524412426435645017103 0ustar """ Handlers: qtable - dummy handler to setup queue tables. All events are ignored. Use in root node. fake_local - dummy handler to setup queue tables. All events are ignored. Table structure is not required. Use in branch/leaf. qsplitter - dummy handler to setup queue tables. All events are ignored. Table structure is not required. All table events are inserted to destination queue, specified with handler arg 'queue'. """ from londiste.handler import BaseHandler import pgq __all__ = ['QueueTableHandler', 'QueueSplitterHandler'] class QueueTableHandler(BaseHandler): """Queue table handler. Do nothing. Trigger: before-insert, skip trigger. Event-processing: do nothing. """ handler_name = 'qtable' def add(self, trigger_arg_list): """Create SKIP and BEFORE INSERT trigger""" trigger_arg_list.append('tgflags=BI') trigger_arg_list.append('SKIP') trigger_arg_list.append('expect_sync') def real_copy(self, tablename, src_curs, dst_curs, column_list): """Force copy not to start""" return (0,0) def needs_table(self): return False class QueueSplitterHandler(BaseHandler): """Send events for one table to another queue. Parameters: queue=QUEUE - Queue name. """ handler_name = 'qsplitter' def __init__(self, table_name, args, dest_table): """Init per-batch table data cache.""" BaseHandler.__init__(self, table_name, args, dest_table) try: self.dst_queue_name = args['queue'] except KeyError: raise Exception('specify queue with handler-arg') self.rows = [] def add(self, trigger_arg_list): trigger_arg_list.append('virtual_table') def prepare_batch(self, batch_info, dst_curs): """Called on first event for this table in current batch.""" self.rows = [] def process_event(self, ev, sql_queue_func, arg): """Process a event. Event should be added to sql_queue or executed directly. """ if self.dst_queue_name is None: return data = [ev.type, ev.data, ev.extra1, ev.extra2, ev.extra3, ev.extra4, ev.time] self.rows.append(data) def finish_batch(self, batch_info, dst_curs): """Called when batch finishes.""" if self.dst_queue_name is None: return fields = ['type', 'data', 'extra1', 'extra2', 'extra3', 'extra4', 'time'] pgq.bulk_insert_events(dst_curs, self.rows, fields, self.dst_queue_name) def needs_table(self): return False __londiste_handlers__ = [QueueTableHandler, QueueSplitterHandler] skytools-3.2.6/python/londiste/handlers/shard.py0000644000000000000000000001061212426435645016727 0ustar """Event filtering by hash, for partitioned databases. Parameters: key=COLUMN: column name to use for hashing hash_key=COLUMN: column name to use for hashing (overrides 'key' parameter) hashfunc=NAME: function to use for hashing (default: partconf.get_hash_raw) hashexpr=EXPR: full expression to use for hashing (deprecated) encoding=ENC: validate and fix incoming data (only utf8 supported atm) ignore_truncate=BOOL: ignore truncate event, default: 0, values: 0,1 On root node: * Hash of key field will be added to ev_extra3. This is implemented by adding additional trigger argument: ev_extra3='hash='||partconf.get_hash_raw(key_column) On branch/leaf node: * On COPY time, the SELECT on provider side gets filtered by hash. * On replay time, the events gets filtered by looking at hash in ev_extra3. Local config: * Local hash value and mask are loaded from partconf.conf table. """ import skytools from londiste.handler import TableHandler __all__ = ['ShardHandler', 'PartHandler'] class ShardHandler (TableHandler): __doc__ = __doc__ handler_name = 'shard' DEFAULT_HASHFUNC = "partconf.get_hash_raw" DEFAULT_HASHEXPR = "%s(%s)" def __init__(self, table_name, args, dest_table): TableHandler.__init__(self, table_name, args, dest_table) self.hash_mask = None # aka max part number (atm) self.shard_nr = None # part number of local node # primary key columns self.hash_key = args.get('hash_key', args.get('key')) self._validate_hash_key() # hash function & full expression hashfunc = args.get('hashfunc', self.DEFAULT_HASHFUNC) self.hashexpr = self.DEFAULT_HASHEXPR % ( skytools.quote_fqident(hashfunc), skytools.quote_ident(self.hash_key or '')) self.hashexpr = args.get('hashexpr', self.hashexpr) def _validate_hash_key(self): if self.hash_key is None: raise Exception('Specify hash key field as hash_key argument') def reset(self): """Forget config info.""" self.hash_mask = None self.shard_nr = None TableHandler.reset(self) def add(self, trigger_arg_list): """Let trigger put hash into extra3""" arg = "ev_extra3='hash='||%s" % self.hashexpr trigger_arg_list.append(arg) TableHandler.add(self, trigger_arg_list) def prepare_batch(self, batch_info, dst_curs): """Called on first event for this table in current batch.""" if self.hash_key is not None: if not self.hash_mask: self.load_shard_info(dst_curs) TableHandler.prepare_batch(self, batch_info, dst_curs) def process_event(self, ev, sql_queue_func, arg): """Filter event by hash in extra3, apply only if for local shard.""" if ev.extra3 and self.hash_key is not None: meta = skytools.db_urldecode(ev.extra3) self.log.debug('shard.process_event: hash=%i, hash_mask=%i, shard_nr=%i', int(meta['hash']), self.hash_mask, self.shard_nr) if (int(meta['hash']) & self.hash_mask) != self.shard_nr: self.log.debug('shard.process_event: not my event') return self._process_event(ev, sql_queue_func, arg) def _process_event(self, ev, sql_queue_func, arg): self.log.debug('shard.process_event: my event, processing') TableHandler.process_event(self, ev, sql_queue_func, arg) def get_copy_condition(self, src_curs, dst_curs): """Prepare the where condition for copy and replay filtering""" if self.hash_key is None: return TableHandler.get_copy_condition(self, src_curs, dst_curs) self.load_shard_info(dst_curs) w = "(%s & %d) = %d" % (self.hashexpr, self.hash_mask, self.shard_nr) self.log.debug('shard: copy_condition=%r', w) return w def load_shard_info(self, curs): """Load part/slot info from database.""" q = "select part_nr, max_part from partconf.conf" curs.execute(q) self.shard_nr, self.hash_mask = curs.fetchone() if self.shard_nr is None or self.hash_mask is None: raise Exception('Error loading shard info') class PartHandler (ShardHandler): __doc__ = "Deprecated compat name for shard handler.\n" + __doc__.split('\n',1)[1] handler_name = 'part' # register handler class __londiste_handlers__ = [ShardHandler, PartHandler] skytools-3.2.6/python/londiste/handlers/__init__.py0000644000000000000000000000232112426435645017363 0ustar # handlers module import new import sys DEFAULT_HANDLERS = [ 'londiste.handlers.qtable', 'londiste.handlers.applyfn', 'londiste.handlers.shard', 'londiste.handlers.multimaster', 'londiste.handlers.vtable', 'londiste.handlers.bulk', 'londiste.handlers.dispatch', ] def handler_args(name, cls): """Handler arguments initialization decorator Define successor for handler class cls with func as argument generator """ def wrapper(func): def _init_override(self, table_name, args, dest_table): cls.__init__(self, table_name, func(args.copy()), dest_table) dct = {'__init__': _init_override, 'handler_name': name} module = sys.modules[cls.__module__] newname = '%s_%s' % (cls.__name__, name.replace('.','_')) newcls = new.classobj(newname, (cls,), dct) setattr(module, newname, newcls) module.__londiste_handlers__.append(newcls) module.__all__.append(newname) return func return wrapper def update(*p): """ Update dicts given in params with its predecessor param dict in reverse order """ return reduce(lambda x, y: x.update(y) or x, (p[i] for i in range(len(p)-1,-1,-1)), {}) skytools-3.2.6/python/londiste/syncer.py0000644000000000000000000003174512426435645015343 0ustar """Catch moment when tables are in sync on master and slave. """ import sys, time, skytools from londiste.handler import build_handler, load_handler_modules from londiste.util import find_copy_source class ATable: def __init__(self, row): self.table_name = row['table_name'] self.dest_table = row['dest_table'] or row['table_name'] self.merge_state = row['merge_state'] attrs = row['table_attrs'] or '' self.table_attrs = skytools.db_urldecode(attrs) hstr = self.table_attrs.get('handler', '') self.plugin = build_handler(self.table_name, hstr, row['dest_table']) class Syncer(skytools.DBScript): """Walks tables in primary key order and checks if data matches.""" bad_tables = 0 provider_info = None def __init__(self, args): """Syncer init.""" skytools.DBScript.__init__(self, 'londiste3', args) self.set_single_loop(1) # compat names self.queue_name = self.cf.get("pgq_queue_name", '') self.consumer_name = self.cf.get('pgq_consumer_id', '') # good names if not self.queue_name: self.queue_name = self.cf.get("queue_name") if not self.consumer_name: self.consumer_name = self.cf.get('consumer_name', self.job_name) self.lock_timeout = self.cf.getfloat('lock_timeout', 10) if self.pidfile: self.pidfile += ".repair" load_handler_modules(self.cf) def set_lock_timeout(self, curs): ms = int(1000 * self.lock_timeout) if ms > 0: q = "SET LOCAL statement_timeout = %d" % ms self.log.debug(q) curs.execute(q) def init_optparse(self, p=None): """Initialize cmdline switches.""" p = skytools.DBScript.init_optparse(self, p) p.add_option("--force", action="store_true", help="ignore lag") return p def get_provider_info(self, setup_curs): q = "select ret_code, ret_note, node_name, node_type, worker_name"\ " from pgq_node.get_node_info(%s)" res = self.exec_cmd(setup_curs, q, [self.queue_name]) pnode = res[0] self.log.info('Provider: %s (%s)', pnode['node_name'], pnode['node_type']) return pnode def check_consumer(self, setup_db, dst_db): """Before locking anything check if consumer is working ok.""" setup_curs = setup_db.cursor() dst_curs = dst_db.cursor() c = 0 while 1: q = "select * from pgq_node.get_consumer_state(%s, %s)" res = self.exec_cmd(dst_db, q, [self.queue_name, self.consumer_name]) completed_tick = res[0]['completed_tick'] q = "select extract(epoch from ticker_lag) from pgq.get_queue_info(%s)" setup_curs.execute(q, [self.queue_name]) ticker_lag = setup_curs.fetchone()[0] q = "select extract(epoch from (now() - t.tick_time)) as lag"\ " from pgq.tick t, pgq.queue q"\ " where q.queue_name = %s"\ " and t.tick_queue = q.queue_id"\ " and t.tick_id = %s" setup_curs.execute(q, [self.queue_name, completed_tick]) res = setup_curs.fetchall() if len(res) == 0: self.log.warning('Consumer completed_tick (%d) to not exists on provider (%s), too big lag?', completed_tick, self.provider_info['node_name']) self.sleep(10) continue consumer_lag = res[0][0] if consumer_lag < ticker_lag + 5: break lag_msg = 'Consumer lag: %s, ticker_lag %s, too big difference, waiting' if c % 30 == 0: self.log.warning(lag_msg, consumer_lag, ticker_lag) else: self.log.debug(lag_msg, consumer_lag, ticker_lag) c += 1 time.sleep(1) def get_tables(self, db): """Load table info. Returns tuple of (dict(name->ATable), namelist)""" curs = db.cursor() q = "select table_name, merge_state, dest_table, table_attrs"\ " from londiste.get_table_list(%s) where local" curs.execute(q, [self.queue_name]) rows = curs.fetchall() db.commit() res = {} names = [] for row in rows: t = ATable(row) res[t.table_name] = t names.append(t.table_name) return res, names def work(self): """Syncer main function.""" # 'SELECT 1' and COPY must use same snapshot, so change isolation level. dst_db = self.get_database('db', isolation_level = skytools.I_REPEATABLE_READ) pnode, ploc = self.get_provider_location(dst_db) dst_tables, names = self.get_tables(dst_db) if len(self.args) > 2: tlist = self.args[2:] else: tlist = names for tbl in tlist: tbl = skytools.fq_name(tbl) if not tbl in dst_tables: self.log.warning('Table not subscribed: %s', tbl) continue t2 = dst_tables[tbl] if t2.merge_state != 'ok': self.log.warning('Table %s not synced yet, no point', tbl) continue pnode, ploc, wname = find_copy_source(self, self.queue_name, tbl, pnode, ploc) self.log.info('%s: Using node %s as provider', tbl, pnode) if wname is None: wname = self.consumer_name self.downstream_worker_name = wname self.process_one_table(tbl, t2, dst_db, pnode, ploc) # signal caller about bad tables sys.exit(self.bad_tables) def process_one_table(self, tbl, t2, dst_db, provider_node, provider_loc): lock_db = self.get_database('lock_db', connstr = provider_loc, profile = 'remote') setup_db = self.get_database('setup_db', autocommit = 1, connstr = provider_loc, profile = 'remote') src_db = self.get_database('provider_db', connstr = provider_loc, profile = 'remote', isolation_level = skytools.I_REPEATABLE_READ) setup_curs = setup_db.cursor() # provider node info self.provider_info = self.get_provider_info(setup_curs) src_tables, ignore = self.get_tables(src_db) if not tbl in src_tables: self.log.warning('Table not available on provider: %s', tbl) return t1 = src_tables[tbl] if t1.merge_state != 'ok': self.log.warning('Table %s not ready yet on provider', tbl) return #self.check_consumer(setup_db, dst_db) self.check_table(t1, t2, lock_db, src_db, dst_db, setup_db) lock_db.commit() src_db.commit() dst_db.commit() self.close_database('setup_db') self.close_database('lock_db') self.close_database('provider_db') def force_tick(self, setup_curs, wait=True): q = "select pgq.force_tick(%s)" setup_curs.execute(q, [self.queue_name]) res = setup_curs.fetchone() cur_pos = res[0] if not wait: return cur_pos start = time.time() while 1: time.sleep(0.5) setup_curs.execute(q, [self.queue_name]) res = setup_curs.fetchone() if res[0] != cur_pos: # new pos return res[0] # dont loop more than 10 secs dur = time.time() - start #if dur > 10 and not self.options.force: # raise Exception("Ticker seems dead") def check_table(self, t1, t2, lock_db, src_db, dst_db, setup_db): """Get transaction to same state, then process.""" src_tbl = t1.dest_table dst_tbl = t2.dest_table lock_curs = lock_db.cursor() src_curs = src_db.cursor() dst_curs = dst_db.cursor() if not skytools.exists_table(src_curs, src_tbl): self.log.warning("Table %s does not exist on provider side", src_tbl) return if not skytools.exists_table(dst_curs, dst_tbl): self.log.warning("Table %s does not exist on subscriber side", dst_tbl) return # lock table against changes try: if self.provider_info['node_type'] == 'root': self.lock_table_root(lock_db, setup_db, dst_db, src_tbl, dst_tbl) else: self.lock_table_branch(lock_db, setup_db, dst_db, src_tbl, dst_tbl) # take snapshot on provider side src_db.commit() src_curs.execute("SELECT 1") # take snapshot on subscriber side dst_db.commit() dst_curs.execute("SELECT 1") finally: # release lock if self.provider_info['node_type'] == 'root': self.unlock_table_root(lock_db, setup_db) else: self.unlock_table_branch(lock_db, setup_db) # do work bad = self.process_sync(t1, t2, src_db, dst_db) if bad: self.bad_tables += 1 # done src_db.commit() dst_db.commit() def lock_table_root(self, lock_db, setup_db, dst_db, src_tbl, dst_tbl): setup_curs = setup_db.cursor() lock_curs = lock_db.cursor() # lock table in separate connection self.log.info('Locking %s', src_tbl) lock_db.commit() self.set_lock_timeout(lock_curs) lock_time = time.time() lock_curs.execute("LOCK TABLE %s IN SHARE MODE" % skytools.quote_fqident(src_tbl)) # now wait until consumer has updated target table until locking self.log.info('Syncing %s', dst_tbl) # consumer must get futher than this tick tick_id = self.force_tick(setup_curs) # try to force second tick also self.force_tick(setup_curs) # now wait while 1: time.sleep(0.5) q = "select * from pgq_node.get_node_info(%s)" res = self.exec_cmd(dst_db, q, [self.queue_name]) last_tick = res[0]['worker_last_tick'] if last_tick > tick_id: break # limit lock time if time.time() > lock_time + self.lock_timeout and not self.options.force: self.log.error('Consumer lagging too much, exiting') lock_db.rollback() sys.exit(1) def unlock_table_root(self, lock_db, setup_db): lock_db.commit() def lock_table_branch(self, lock_db, setup_db, dst_db, src_tbl, dst_tbl): setup_curs = setup_db.cursor() lock_time = time.time() self.old_worker_paused = self.pause_consumer(setup_curs, self.provider_info['worker_name']) lock_curs = lock_db.cursor() self.log.info('Syncing %s', dst_tbl) # consumer must get futher than this tick tick_id = self.force_tick(setup_curs, False) # now wait while 1: time.sleep(0.5) q = "select * from pgq_node.get_node_info(%s)" res = self.exec_cmd(dst_db, q, [self.queue_name]) last_tick = res[0]['worker_last_tick'] if last_tick > tick_id: break # limit lock time if time.time() > lock_time + self.lock_timeout and not self.options.force: self.log.error('Consumer lagging too much, exiting') lock_db.rollback() sys.exit(1) def unlock_table_branch(self, lock_db, setup_db): # keep worker paused if it was so before if self.old_worker_paused: return setup_curs = setup_db.cursor() self.resume_consumer(setup_curs, self.provider_info['worker_name']) def process_sync(self, t1, t2, src_db, dst_db): """It gets 2 connections in state where tbl should be in same state. """ raise Exception('process_sync not implemented') def get_provider_location(self, dst_db): curs = dst_db.cursor() q = "select * from pgq_node.get_node_info(%s)" rows = self.exec_cmd(dst_db, q, [self.queue_name]) return (rows[0]['provider_node'], rows[0]['provider_location']) def pause_consumer(self, curs, cons_name): self.log.info("Pausing upstream worker: %s", cons_name) return self.set_pause_flag(curs, cons_name, True) def resume_consumer(self, curs, cons_name): self.log.info("Resuming upstream worker: %s", cons_name) return self.set_pause_flag(curs, cons_name, False) def set_pause_flag(self, curs, cons_name, flag): q = "select * from pgq_node.get_consumer_state(%s, %s)" res = self.exec_cmd(curs, q, [self.queue_name, cons_name]) oldflag = res[0]['paused'] q = "select * from pgq_node.set_consumer_paused(%s, %s, %s)" self.exec_cmd(curs, q, [self.queue_name, cons_name, flag]) while 1: q = "select * from pgq_node.get_consumer_state(%s, %s)" res = self.exec_cmd(curs, q, [self.queue_name, cons_name]) if res[0]['uptodate']: break time.sleep(0.5) return oldflag skytools-3.2.6/python/londiste/exec_attrs.py0000644000000000000000000002511112426435645016167 0ustar """Custom parser for EXECUTE attributes. The values are parsed from SQL file given to EXECUTE. Format rules: * Only lines starting with meta-comment prefix will be parsed: --*-- * Empty or regular SQL comment lines are ignored. * Parsing stops on first SQL statement. * Meta-line format: "--*-- Key: value1, value2" * If line ends with ',' then next line is taken as continuation. Supported keys: * Local-Table: * Local-Sequence: * Local-Destination: * Need-Table * Need-Sequence * Need-Function * Need-Schema * Need-View Sample file:: --*-- Local-Sequence: myseq --*-- --*-- Local-Table: table1, --*-- table2, table3 --*-- Tests: >>> a = ExecAttrs() >>> a.add_value("Local-Table", "mytable") >>> a.add_value("Local-Sequence", "seq1") >>> a.add_value("Local-Sequence", "seq2") >>> a.to_urlenc() 'local-table=mytable&local-sequence=seq1%2Cseq2' >>> a.add_value("Local-Destination", "mytable-longname-more1") >>> a.add_value("Local-Destination", "mytable-longname-more2") >>> a.add_value("Local-Destination", "mytable-longname-more3") >>> a.add_value("Local-Destination", "mytable-longname-more4") >>> a.add_value("Local-Destination", "mytable-longname-more5") >>> a.add_value("Local-Destination", "mytable-longname-more6") >>> a.add_value("Local-Destination", "mytable-longname-more7") >>> print a.to_sql() --*-- Local-Table: mytable --*-- Local-Sequence: seq1, seq2 --*-- Local-Destination: mytable-longname-more1, mytable-longname-more2, --*-- mytable-longname-more3, mytable-longname-more4, mytable-longname-more5, --*-- mytable-longname-more6, mytable-longname-more7 >>> a = ExecAttrs(sql = ''' ... ... -- ... ... --*-- Local-Table: foo , ... -- ... --*-- bar , ... --*-- ... --*-- zoo ... --*-- ... --*-- Local-Sequence: goo ... --*-- ... -- ... ... create fooza; ... ''') >>> print a.to_sql() --*-- Local-Table: foo, bar, zoo --*-- Local-Sequence: goo >>> seqs = {'public.goo': 'public.goo'} >>> tables = {} >>> tables['public.foo'] = 'public.foo' >>> tables['public.bar'] = 'other.Bar' >>> tables['public.zoo'] = 'Other.Foo' >>> a.need_execute(None, tables, seqs) True >>> a.need_execute(None, [], []) False >>> sql = '''alter table @foo@; ... alter table @bar@; ... alter table @zoo@;''' >>> print a.process_sql(sql, tables, seqs) alter table public.foo; alter table other."Bar"; alter table "Other"."Foo"; """ import skytools META_PREFIX = "--*--" class Matcher: nice_name = None def match(self, objname, curs, tables, seqs): pass def get_key(self): return self.nice_name.lower() def local_rename(self): return False class LocalTable(Matcher): nice_name = "Local-Table" def match(self, objname, curs, tables, seqs): return objname in tables def local_rename(self): return True class LocalSequence(Matcher): nice_name = "Local-Sequence" def match(self, objname, curs, tables, seqs): return objname in seqs def local_rename(self): return True class LocalDestination(Matcher): nice_name = "Local-Destination" def match(self, objname, curs, tables, seqs): if objname not in tables: return False dest_name = tables[objname] return skytools.exists_table(curs, dest_name) def local_rename(self): return True class NeedTable(Matcher): nice_name = "Need-Table" def match(self, objname, curs, tables, seqs): return skytools.exists_table(curs, objname) class NeedSequence(Matcher): nice_name = "Need-Sequence" def match(self, objname, curs, tables, seqs): return skytools.exists_sequence(curs, objname) class NeedSchema(Matcher): nice_name = "Need-Schema" def match(self, objname, curs, tables, seqs): return skytools.exists_schema(curs, objname) class NeedFunction(Matcher): nice_name = "Need-Function" def match(self, objname, curs, tables, seqs): nargs = 0 pos1 = objname.find('(') if pos1 > 0: pos2 = objname.find(')') if pos2 > 0: s = objname[pos1+1 : pos2] objname = objname[:pos1] nargs = int(s) return skytools.exists_function(curs, objname, nargs) class NeedView(Matcher): nice_name = "Need-View" def match(self, objname, curs, tables, seqs): return skytools.exists_view(curs, objname) META_SPLITLINE = 70 # list of matches, in order they need to be probed META_MATCHERS = [ LocalTable(), LocalSequence(), LocalDestination(), NeedTable(), NeedSequence(), NeedFunction(), NeedSchema(), NeedView() ] # key to nice key META_KEYS = {} for m in META_MATCHERS: k = m.nice_name.lower() META_KEYS[k] = m class ExecAttrsException(skytools.UsageError): """Some parsing problem.""" class ExecAttrs: """Container and parser for EXECUTE attributes.""" def __init__(self, sql=None, urlenc=None): """Create container and parse either sql or urlenc string.""" self.attrs = {} if sql and urlenc: raise Exception("Both sql and urlenc set.") if urlenc: self.parse_urlenc(urlenc) elif sql: self.parse_sql(sql) def add_value(self, k, v): """Add single value to key.""" xk = k.lower().strip() if xk not in META_KEYS: raise ExecAttrsException("Invalid key: %s" % k) if xk not in self.attrs: self.attrs[xk] = [] xv = v.strip() self.attrs[xk].append(xv) def to_urlenc(self): """Convert container to urlencoded string.""" sdict = {} for k, v in self.attrs.items(): sdict[k] = ','.join(v) return skytools.db_urlencode(sdict) def parse_urlenc(self, ustr): """Parse urlencoded string adding values to current container.""" sdict = skytools.db_urldecode(ustr) for k, v in sdict.items(): for v1 in v.split(','): self.add_value(k, v1) def to_sql(self): """Convert container to SQL meta-comments.""" lines = [] for m in META_MATCHERS: k = m.get_key() if k not in self.attrs: continue vlist = self.attrs[k] ln = "%s %s: " % (META_PREFIX, m.nice_name) start = 0 for nr, v in enumerate(vlist): if nr > start: ln = ln + ", " + v else: ln = ln + v if len(ln) >= META_SPLITLINE and nr < len(vlist) - 1: ln += ',' lines.append(ln) ln = META_PREFIX + " " start = nr + 1 lines.append(ln) return '\n'.join(lines) def parse_sql(self, sql): """Parse SQL meta-comments.""" cur_key = None cur_continued = False lineno = 1 for nr, ln in enumerate(sql.splitlines()): lineno = nr+1 # skip empty lines ln = ln.strip() if not ln: continue # stop at non-comment if ln[:2] != '--': break # parse only meta-comments if ln[:len(META_PREFIX)] != META_PREFIX: continue # cut prefix, skip empty comments ln = ln[len(META_PREFIX):].strip() if not ln: continue # continuation of previous key if cur_continued: # collect values for v in ln.split(','): v = v.strip() if not v: continue self.add_value(cur_key, v) # does this key continue? if ln[-1] != ',': cur_key = None cur_continued = False # go to next line continue # parse key pos = ln.find(':') if pos < 0: continue k = ln[:pos].strip() # collect values for v in ln[pos+1:].split(','): v = v.strip() if not v: continue self.add_value(k, v) # check if current key values will continue if ln[-1] == ',': cur_key = k cur_continued = True else: cur_key = None cur_continued = False def need_execute(self, curs, local_tables, local_seqs): # if no attrs, always execute if not self.attrs: return True matched = 0 missed = 0 good_list = [] miss_list = [] for m in META_MATCHERS: k = m.get_key() if k not in self.attrs: continue for v in self.attrs[k]: fqname = skytools.fq_name(v) if m.match(fqname, curs, local_tables, local_seqs): matched += 1 good_list.append(v) else: missed += 1 miss_list.append(v) # should be drop out early? if matched > 0 and missed == 0: return True elif missed > 0 and matched == 0: return False elif missed == 0 and matched == 0: # should not happen, but lets restore old behaviour? return True else: raise Exception("SQL only partially matches local setup: matches=%r misses=%r" % (good_list, miss_list)) def get_attr(self, k): k = k.lower().strip() if k not in META_KEYS: raise Exception("Bug: invalid key requested: " + k) if k not in self.attrs: return [] return self.attrs[k] def process_sql(self, sql, local_tables, local_seqs): """Replace replacement tags in sql with actual local names.""" for k, vlist in self.attrs.items(): m = META_KEYS[k] if not m.local_rename(): continue for v in vlist: repname = '@%s@' % v fqname = skytools.fq_name(v) if fqname in local_tables: localname = local_tables[fqname] elif fqname in local_seqs: localname = local_seqs[fqname] else: # should not happen raise Exception("bug: lost table: "+v) qdest = skytools.quote_fqident(localname) sql = sql.replace(repname, qdest) return sql if __name__ == "__main__": import doctest doctest.testmod() skytools-3.2.6/python/londiste/compare.py0000644000000000000000000001027612426435645015462 0ustar #! /usr/bin/env python """Compares tables in replication set. Currently just does count(1) on both sides. """ import sys, skytools __all__ = ['Comparator'] from londiste.syncer import Syncer class Comparator(Syncer): """Simple checker based on Syncer. When tables are in sync runs simple SQL query on them. """ def process_sync(self, t1, t2, src_db, dst_db): """Actual comparison.""" src_tbl = t1.dest_table dst_tbl = t2.dest_table src_curs = src_db.cursor() dst_curs = dst_db.cursor() dst_where = t2.plugin.get_copy_condition(src_curs, dst_curs) src_where = dst_where self.log.info('Counting %s', dst_tbl) # get common cols cols = self.calc_cols(src_curs, src_tbl, dst_curs, dst_tbl) # get sane query v1 = src_db.server_version v2 = dst_db.server_version if self.options.count_only: q = "select count(1) as cnt from only _TABLE_" elif v1 < 80300 or v2 < 80300: # 8.2- does not have record to text and text to bit casts, so we need to use a bit of evil hackery q = "select count(1) as cnt, sum(bit_in(textout('x'||substr(md5(textin(record_out(_COLS_))),1,16)), 0, 64)::bigint) as chksum from only _TABLE_" elif (v1 < 80400 or v2 < 80400) and v1 != v2: # hashtext changed in 8.4 so we need to use md5 in case there is 8.3 vs 8.4+ comparison q = "select count(1) as cnt, sum(('x'||substr(md5(_COLS_::text),1,16))::bit(64)::bigint) as chksum from only _TABLE_" else: # this way is much faster than the above q = "select count(1) as cnt, sum(hashtext(_COLS_::text)::bigint) as chksum from only _TABLE_" q = self.cf.get('compare_sql', q) q = q.replace("_COLS_", cols) src_q = q.replace('_TABLE_', skytools.quote_fqident(src_tbl)) if src_where: src_q = src_q + " WHERE " + src_where dst_q = q.replace('_TABLE_', skytools.quote_fqident(dst_tbl)) if dst_where: dst_q = dst_q + " WHERE " + dst_where f = "%(cnt)d rows" if not self.options.count_only: f += ", checksum=%(chksum)s" f = self.cf.get('compare_fmt', f) self.log.debug("srcdb: %s", src_q) src_curs.execute(src_q) src_row = src_curs.fetchone() src_str = f % src_row self.log.info("srcdb: %s", src_str) src_db.commit() self.log.debug("dstdb: %s", dst_q) dst_curs.execute(dst_q) dst_row = dst_curs.fetchone() dst_str = f % dst_row self.log.info("dstdb: %s", dst_str) dst_db.commit() if src_str != dst_str: self.log.warning("%s: Results do not match!", dst_tbl) return 1 return 0 def calc_cols(self, src_curs, src_tbl, dst_curs, dst_tbl): cols1 = self.load_cols(src_curs, src_tbl) cols2 = self.load_cols(dst_curs, dst_tbl) qcols = [] for c in self.calc_common(cols1, cols2): qcols.append(skytools.quote_ident(c)) return "(%s)" % ",".join(qcols) def load_cols(self, curs, tbl): schema, table = skytools.fq_name_parts(tbl) q = "select column_name from information_schema.columns"\ " where table_schema = %s and table_name = %s" curs.execute(q, [schema, table]) cols = [] for row in curs.fetchall(): cols.append(row[0]) return cols def calc_common(self, cols1, cols2): common = [] map2 = {} for c in cols2: map2[c] = 1 for c in cols1: if c in map2: common.append(c) if len(common) == 0: raise Exception("no common columns found") if len(common) != len(cols1) or len(cols2) != len(cols1): self.log.warning("Ignoring some columns") return common def init_optparse(self, p=None): """Initialize cmdline switches.""" p = super(Comparator, self).init_optparse(p) p.add_option("--count-only", action="store_true", help="just count rows, do not compare data") return p if __name__ == '__main__': script = Comparator(sys.argv[1:]) script.start() skytools-3.2.6/python/londiste/repair.py0000644000000000000000000002441712426435645015320 0ustar """Repair data on subscriber. Walks tables by primary key and searches for missing inserts/updates/deletes. """ import sys, os, skytools, subprocess from londiste.syncer import Syncer __all__ = ['Repairer'] def unescape(s): """Remove copy escapes.""" return skytools.unescape_copy(s) class Repairer(Syncer): """Walks tables in primary key order and checks if data matches.""" cnt_insert = 0 cnt_update = 0 cnt_delete = 0 total_src = 0 total_dst = 0 pkey_list = [] common_fields = [] apply_curs = None def init_optparse(self, p=None): """Initialize cmdline switches.""" p = super(Repairer, self).init_optparse(p) p.add_option("--apply", action="store_true", help="apply fixes") return p def process_sync(self, t1, t2, src_db, dst_db): """Actual comparison.""" apply_db = None if self.options.apply: apply_db = self.get_database('db', cache='applydb', autocommit=1) self.apply_curs = apply_db.cursor() self.apply_curs.execute("set session_replication_role = 'replica'") src_tbl = t1.dest_table dst_tbl = t2.dest_table src_curs = src_db.cursor() dst_curs = dst_db.cursor() self.log.info('Checking %s', dst_tbl) self.common_fields = [] self.fq_common_fields = [] self.pkey_list = [] self.load_common_columns(src_tbl, dst_tbl, src_curs, dst_curs) dump_src = dst_tbl + ".src" dump_dst = dst_tbl + ".dst" dump_src_sorted = dump_src + ".sorted" dump_dst_sorted = dump_dst + ".sorted" dst_where = t2.plugin.get_copy_condition(src_curs, dst_curs) src_where = dst_where self.log.info("Dumping src table: %s", src_tbl) self.dump_table(src_tbl, src_curs, dump_src, src_where) src_db.commit() self.log.info("Dumping dst table: %s", dst_tbl) self.dump_table(dst_tbl, dst_curs, dump_dst, dst_where) dst_db.commit() self.log.info("Sorting src table: %s", dump_src) self.do_sort(dump_src, dump_src_sorted) self.log.info("Sorting dst table: %s", dump_dst) self.do_sort(dump_dst, dump_dst_sorted) self.dump_compare(dst_tbl, dump_src_sorted, dump_dst_sorted) os.unlink(dump_src) os.unlink(dump_dst) os.unlink(dump_src_sorted) os.unlink(dump_dst_sorted) def do_sort(self, src, dst): """ Sort contents of src file, write them to dst file. """ p = subprocess.Popen(["sort", "--version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) s_ver = p.communicate()[0] del p xenv = os.environ.copy() xenv['LANG'] = 'C' xenv['LC_ALL'] = 'C' cmdline = ['sort', '-T', '.'] if s_ver.find("coreutils") > 0: cmdline.append('-S') cmdline.append('30%') cmdline.append('-o') cmdline.append(dst) cmdline.append(src) p = subprocess.Popen(cmdline, env = xenv) if p.wait() != 0: raise Exception('sort failed') def load_common_columns(self, src_tbl, dst_tbl, src_curs, dst_curs): """Get common fields, put pkeys in start.""" self.pkey_list = skytools.get_table_pkeys(src_curs, src_tbl) dst_pkey = skytools.get_table_pkeys(dst_curs, dst_tbl) if dst_pkey != self.pkey_list: self.log.error('pkeys do not match') sys.exit(1) src_cols = skytools.get_table_columns(src_curs, src_tbl) dst_cols = skytools.get_table_columns(dst_curs, dst_tbl) field_list = [] for f in self.pkey_list: field_list.append(f) for f in src_cols: if f in self.pkey_list: continue if f in dst_cols: field_list.append(f) self.common_fields = field_list fqlist = [skytools.quote_ident(col) for col in field_list] self.fq_common_fields = fqlist cols = ",".join(fqlist) self.log.debug("using columns: %s", cols) def dump_table(self, tbl, curs, fn, whr): """Dump table to disk.""" cols = ','.join(self.fq_common_fields) if len(whr) == 0: whr = 'true' q = "copy (SELECT %s FROM %s WHERE %s) to stdout" % (cols, skytools.quote_fqident(tbl), whr) self.log.debug("Query: %s", q) f = open(fn, "w", 64*1024) curs.copy_expert(q, f) size = f.tell() f.close() self.log.info('%s: Got %d bytes', tbl, size) def get_row(self, ln): """Parse a row into dict.""" if not ln: return None t = ln[:-1].split('\t') row = {} for i in range(len(self.common_fields)): row[self.common_fields[i]] = t[i] return row def dump_compare(self, tbl, src_fn, dst_fn): """ Compare two table dumps, create sql file to fix target table or apply changes to target table directly. """ self.log.info("Comparing dumps: %s", tbl) self.cnt_insert = 0 self.cnt_update = 0 self.cnt_delete = 0 self.total_src = 0 self.total_dst = 0 f1 = open(src_fn, "r", 64*1024) f2 = open(dst_fn, "r", 64*1024) src_ln = f1.readline() dst_ln = f2.readline() if src_ln: self.total_src += 1 if dst_ln: self.total_dst += 1 fix = "fix.%s.sql" % tbl if os.path.isfile(fix): os.unlink(fix) while src_ln or dst_ln: keep_src = keep_dst = 0 if src_ln != dst_ln: src_row = self.get_row(src_ln) dst_row = self.get_row(dst_ln) diff = self.cmp_keys(src_row, dst_row) if diff > 0: # src > dst self.got_missed_delete(tbl, dst_row) keep_src = 1 elif diff < 0: # src < dst self.got_missed_insert(tbl, src_row) keep_dst = 1 else: if self.cmp_data(src_row, dst_row) != 0: self.got_missed_update(tbl, src_row, dst_row) if not keep_src: src_ln = f1.readline() if src_ln: self.total_src += 1 if not keep_dst: dst_ln = f2.readline() if dst_ln: self.total_dst += 1 self.log.info("finished %s: src: %d rows, dst: %d rows," " missed: %d inserts, %d updates, %d deletes", tbl, self.total_src, self.total_dst, self.cnt_insert, self.cnt_update, self.cnt_delete) def got_missed_insert(self, tbl, src_row): """Create sql for missed insert.""" self.cnt_insert += 1 fld_list = self.common_fields fq_list = [] val_list = [] for f in fld_list: fq_list.append(skytools.quote_ident(f)) v = unescape(src_row[f]) val_list.append(skytools.quote_literal(v)) q = "insert into %s (%s) values (%s);" % ( tbl, ", ".join(fq_list), ", ".join(val_list)) self.show_fix(tbl, q, 'insert') def got_missed_update(self, tbl, src_row, dst_row): """Create sql for missed update.""" self.cnt_update += 1 fld_list = self.common_fields set_list = [] whe_list = [] for f in self.pkey_list: self.addcmp(whe_list, skytools.quote_ident(f), unescape(src_row[f])) for f in fld_list: v1 = src_row[f] v2 = dst_row[f] if self.cmp_value(v1, v2) == 0: continue self.addeq(set_list, skytools.quote_ident(f), unescape(v1)) self.addcmp(whe_list, skytools.quote_ident(f), unescape(v2)) q = "update only %s set %s where %s;" % ( tbl, ", ".join(set_list), " and ".join(whe_list)) self.show_fix(tbl, q, 'update') def got_missed_delete(self, tbl, dst_row): """Create sql for missed delete.""" self.cnt_delete += 1 whe_list = [] for f in self.pkey_list: self.addcmp(whe_list, skytools.quote_ident(f), unescape(dst_row[f])) q = "delete from only %s where %s;" % (skytools.quote_fqident(tbl), " and ".join(whe_list)) self.show_fix(tbl, q, 'delete') def show_fix(self, tbl, q, desc): """Print/write/apply repair sql.""" self.log.debug("missed %s: %s", desc, q) if self.apply_curs: self.apply_curs.execute(q) else: fn = "fix.%s.sql" % tbl open(fn, "a").write("%s\n" % q) def addeq(self, list, f, v): """Add quoted SET.""" vq = skytools.quote_literal(v) s = "%s = %s" % (f, vq) list.append(s) def addcmp(self, list, f, v): """Add quoted comparison.""" if v is None: s = "%s is null" % f else: vq = skytools.quote_literal(v) s = "%s = %s" % (f, vq) list.append(s) def cmp_data(self, src_row, dst_row): """Compare data field-by-field.""" for k in self.common_fields: v1 = src_row[k] v2 = dst_row[k] if self.cmp_value(v1, v2) != 0: return -1 return 0 def cmp_value(self, v1, v2): """Compare single field, tolerates tz vs notz dates.""" if v1 == v2: return 0 # try to work around tz vs. notz z1 = len(v1) z2 = len(v2) if z1 == z2 + 3 and z2 >= 19 and v1[z2] == '+': v1 = v1[:-3] if v1 == v2: return 0 elif z1 + 3 == z2 and z1 >= 19 and v2[z1] == '+': v2 = v2[:-3] if v1 == v2: return 0 return -1 def cmp_keys(self, src_row, dst_row): """Compare primary keys of the rows. Returns 1 if src > dst, -1 if src < dst and 0 if src == dst""" # None means table is done. tag it larger than any existing row. if src_row is None: if dst_row is None: return 0 return 1 elif dst_row is None: return -1 for k in self.pkey_list: v1 = src_row[k] v2 = dst_row[k] if v1 < v2: return -1 elif v1 > v2: return 1 return 0 skytools-3.2.6/python/londiste/table_copy.py0000644000000000000000000002260412426435645016153 0ustar #! /usr/bin/env python """Do a full table copy. For internal usage. """ import sys, time, skytools from londiste.util import find_copy_source from skytools.dbstruct import * from londiste.playback import * __all__ = ['CopyTable'] class CopyTable(Replicator): """Table copy thread implementation.""" reg_ok = False def __init__(self, args, copy_thread = 1): """Initializer. copy_thread arg shows if the copy process is separate from main Playback thread or not. copy_thread=0 means copying happens in same process. """ Replicator.__init__(self, args) if not copy_thread: raise Exception("Combined copy not supported") if len(self.args) != 3: self.log.error("londiste copy requires table name") sys.exit(1) self.copy_table_name = self.args[2] sfx = self.get_copy_suffix(self.copy_table_name) self.old_consumer_name = self.consumer_name self.pidfile += sfx self.consumer_name += sfx self.copy_thread = 1 self.main_worker = False def get_copy_suffix(self, tblname): return ".copy.%s" % tblname def reload_table_stat(self, dst_curs, tblname): self.load_table_state(dst_curs) if tblname not in self.table_map: self.log.warning('Table %s removed from replication', tblname) sys.exit(1) t = self.table_map[tblname] return t def do_copy(self, tbl_stat, src_db, dst_db): """Entry point into copying logic.""" dst_db.commit() src_curs = src_db.cursor() dst_curs = dst_db.cursor() while 1: if tbl_stat.copy_role == 'wait-copy': self.log.info('waiting for first partition to initialize copy') elif tbl_stat.max_parallel_copies_reached(): self.log.info('number of max parallel copies (%s) reached', tbl_stat.max_parallel_copy) else: break time.sleep(10) tbl_stat = self.reload_table_stat(dst_curs, tbl_stat.name) dst_db.commit() while 1: pmap = self.get_state_map(src_db.cursor()) src_db.commit() if tbl_stat.name not in pmap: raise Exception("table %s not available on provider" % tbl_stat.name) pt = pmap[tbl_stat.name] if pt.state == TABLE_OK: break self.log.warning("table %s not in sync yet on provider, waiting", tbl_stat.name) time.sleep(10) src_real_table = pt.dest_table # 0 - dont touch # 1 - single tx # 2 - multi tx cmode = 1 if tbl_stat.copy_role == 'lead': cmode = 2 elif tbl_stat.copy_role: cmode = 0 # We need to see COPY snapshot from txid_current_snapshot() later. oldiso = src_db.isolation_level src_db.set_isolation_level(skytools.I_REPEATABLE_READ) src_db.commit() self.sync_database_encodings(src_db, dst_db) self.log.info("Starting full copy of %s", tbl_stat.name) # just in case, drop all fkeys (in case "replay" was skipped) # !! this may commit, so must be done before anything else !! if cmode > 0: self.drop_fkeys(dst_db, tbl_stat.dest_table) # now start ddl-dropping tx if cmode > 0: q = "lock table " + skytools.quote_fqident(tbl_stat.dest_table) dst_curs.execute(q) # find dst struct src_struct = TableStruct(src_curs, src_real_table) dst_struct = TableStruct(dst_curs, tbl_stat.dest_table) # take common columns, warn on missing ones dlist = dst_struct.get_column_list() slist = src_struct.get_column_list() common_cols = [] for c in slist: if c not in dlist: self.log.warning("Table %s column %s does not exist on subscriber", tbl_stat.name, c) else: common_cols.append(c) for c in dlist: if c not in slist: self.log.warning("Table %s column %s does not exist on provider", tbl_stat.name, c) # drop unnecessary stuff if cmode > 0: objs = T_CONSTRAINT | T_INDEX | T_RULE | T_PARENT # | T_TRIGGER dst_struct.drop(dst_curs, objs, log = self.log) # drop data if tbl_stat.table_attrs.get('skip_truncate'): self.log.info("%s: skipping truncate", tbl_stat.name) else: self.log.info("%s: truncating", tbl_stat.name) q = "truncate " if dst_db.server_version >= 80400: q += "only " q += skytools.quote_fqident(tbl_stat.dest_table) dst_curs.execute(q) if cmode == 2 and tbl_stat.dropped_ddl is None: ddl = dst_struct.get_create_sql(objs) if ddl: q = "select * from londiste.local_set_table_struct(%s, %s, %s)" self.exec_cmd(dst_curs, q, [self.queue_name, tbl_stat.name, ddl]) else: ddl = None dst_db.commit() tbl_stat.dropped_ddl = ddl # do truncate & copy self.log.info("%s: start copy", tbl_stat.name) p = tbl_stat.get_plugin() stats = p.real_copy(src_real_table, src_curs, dst_curs, common_cols) if stats: self.log.info("%s: copy finished: %d bytes, %d rows", tbl_stat.name, stats[0], stats[1]) # get snapshot src_curs.execute("select txid_current_snapshot()") snapshot = src_curs.fetchone()[0] src_db.commit() # restore old behaviour src_db.set_isolation_level(oldiso) src_db.commit() tbl_stat.change_state(TABLE_CATCHING_UP) tbl_stat.change_snapshot(snapshot) self.save_table_state(dst_curs) # create previously dropped objects if cmode == 1: dst_struct.create(dst_curs, objs, log = self.log) elif cmode == 2: dst_db.commit() # start waiting for other copy processes to finish while tbl_stat.copy_role: self.log.info('waiting for other partitions to finish copy') time.sleep(10) tbl_stat = self.reload_table_stat(dst_curs, tbl_stat.name) dst_db.commit() if tbl_stat.dropped_ddl is not None: self.looping = 0 for ddl in skytools.parse_statements(tbl_stat.dropped_ddl): self.log.info(ddl) dst_curs.execute(ddl) q = "select * from londiste.local_set_table_struct(%s, %s, NULL)" self.exec_cmd(dst_curs, q, [self.queue_name, tbl_stat.name]) tbl_stat.dropped_ddl = None self.looping = 1 dst_db.commit() # hack for copy-in-playback if not self.copy_thread: tbl_stat.change_state(TABLE_OK) self.save_table_state(dst_curs) dst_db.commit() # copy finished if tbl_stat.copy_role == 'wait-replay': return # if copy done, request immediate tick from pgqd, # to make state juggling faster. on mostly idle db-s # each step may take tickers idle_timeout secs, which is pain. q = "select pgq.force_tick(%s)" src_curs.execute(q, [self.queue_name]) src_db.commit() def work(self): if not self.reg_ok: # check if needed? (table, not existing reg) self.register_copy_consumer() self.reg_ok = True return Replicator.work(self) def register_copy_consumer(self): dst_db = self.get_database('db') dst_curs = dst_db.cursor() # fetch table attrs q = "select * from londiste.get_table_list(%s) where table_name = %s" dst_curs.execute(q, [ self.queue_name, self.copy_table_name ]) rows = dst_curs.fetchall() attrs = {} if len(rows) > 0: v_attrs = rows[0]['table_attrs'] if v_attrs: attrs = skytools.db_urldecode(v_attrs) # fetch parent consumer state q = "select * from pgq_node.get_consumer_state(%s, %s)" rows = self.exec_cmd(dst_db, q, [ self.queue_name, self.old_consumer_name ]) state = rows[0] source_node = state['provider_node'] source_location = state['provider_location'] # do we have node here? if 'copy_node' in attrs: if attrs['copy_node'] == '?': source_node, source_location, wname = find_copy_source(self, self.queue_name, self.copy_table_name, source_node, source_location) else: # take node from attrs source_node = attrs['copy_node'] q = "select * from pgq_node.get_queue_locations(%s) where node_name = %s" dst_curs.execute(q, [ self.queue_name, source_node ]) rows = dst_curs.fetchall() if len(rows): source_location = rows[0]['node_location'] self.log.info("Using '%s' as source node", source_node) self.register_consumer(source_location) if __name__ == '__main__': script = CopyTable(sys.argv[1:]) script.start() skytools-3.2.6/python/londiste/util.py0000644000000000000000000000536512426435645015014 0ustar import skytools import londiste.handler __all__ = ['handler_allows_copy', 'find_copy_source'] def handler_allows_copy(table_attrs): """Decide if table is copyable based on attrs.""" if not table_attrs: return True attrs = skytools.db_urldecode(table_attrs) hstr = attrs.get('handler', '') p = londiste.handler.build_handler('unused.string', hstr, None) return p.needs_table() def find_copy_source(script, queue_name, copy_table_name, node_name, node_location): """Find source node for table. @param script: DbScript @param queue_name: name of the cascaded queue @param copy_table_name: name of the table (or list of names) @param node_name: target node name @param node_location: target node location @returns (node_name, node_location, downstream_worker_name) of source node """ # None means no steps upwards were taken, so local consumer is worker worker_name = None if isinstance(copy_table_name, str): need = set([copy_table_name]) else: need = set(copy_table_name) while 1: src_db = script.get_database('_source_db', connstr = node_location, autocommit = 1, profile = 'remote') src_curs = src_db.cursor() q = "select * from pgq_node.get_node_info(%s)" src_curs.execute(q, [queue_name]) info = src_curs.fetchone() if info['ret_code'] >= 400: raise skytools.UsageError("Node does not exist") script.log.info("Checking if %s can be used for copy", info['node_name']) q = "select table_name, local, table_attrs from londiste.get_table_list(%s)" src_curs.execute(q, [queue_name]) got = set() for row in src_curs.fetchall(): tbl = row['table_name'] if tbl not in need: continue if not row['local']: script.log.debug("Problem: %s is not local", tbl) continue if not handler_allows_copy(row['table_attrs']): script.log.debug("Problem: %s handler does not store data [%s]", tbl, row['table_attrs']) continue script.log.debug("Good: %s is usable", tbl) got.add(tbl) script.close_database('_source_db') if got == need: script.log.info("Node %s seems good source, using it", info['node_name']) return node_name, node_location, worker_name else: script.log.info("Node %s does not have all tables", info['node_name']) if info['node_type'] == 'root': raise skytools.UsageError("Found root and no source found") # walk upwards node_name = info['provider_node'] node_location = info['provider_location'] worker_name = info['worker_name'] skytools-3.2.6/python/londiste/setup.py0000644000000000000000000007252612426435645015202 0ustar #! /usr/bin/env python """Londiste setup and sanity checker. """ import sys, os, re, skytools from pgq.cascade.admin import CascadeAdmin from londiste.exec_attrs import ExecAttrs from londiste.util import find_copy_source import londiste.handler __all__ = ['LondisteSetup'] class LondisteSetup(CascadeAdmin): """Londiste-specific admin commands.""" initial_db_name = 'node_db' provider_location = None commands_without_pidfile = CascadeAdmin.commands_without_pidfile + [ 'tables', 'seqs', 'missing', 'show-handlers'] def install_code(self, db): self.extra_objs = [ skytools.DBSchema("londiste", sql_file = 'londiste.sql'), skytools.DBFunction("londiste.global_add_table", 2, sql_file = 'londiste.upgrade_2.1_to_3.1.sql'), ] CascadeAdmin.install_code(self, db) def __init__(self, args): """Londiste setup init.""" CascadeAdmin.__init__(self, 'londiste3', 'db', args, worker_setup = True) # compat self.queue_name = self.cf.get('pgq_queue_name', '') # real if not self.queue_name: self.queue_name = self.cf.get('queue_name') self.set_name = self.queue_name self.lock_timeout = self.cf.getfloat('lock_timeout', 10) londiste.handler.load_handler_modules(self.cf) def init_optparse(self, parser=None): """Add londiste switches to CascadeAdmin ones.""" p = CascadeAdmin.init_optparse(self, parser) p.add_option("--expect-sync", action="store_true", dest="expect_sync", help = "no copy needed", default=False) p.add_option("--skip-truncate", action="store_true", dest="skip_truncate", help = "do not delete old data", default=False) p.add_option("--find-copy-node", action="store_true", dest="find_copy_node", help = "add: find table source for copy by walking upwards") p.add_option("--copy-node", metavar = "NODE", dest="copy_node", help = "add: use NODE as source for initial copy") p.add_option("--force", action="store_true", help="force", default=False) p.add_option("--all", action="store_true", help="include all tables", default=False) p.add_option("--wait-sync", action="store_true", help = "add: wait until all tables are in sync"), p.add_option("--create", action="store_true", help="create, minimal", default=False) p.add_option("--create-full", action="store_true", help="create, full") p.add_option("--trigger-flags", help="set trigger flags (BAIUDLQ)") p.add_option("--trigger-arg", action="append", help="custom trigger arg") p.add_option("--no-triggers", action="store_true", help="no triggers on table") p.add_option("--handler", action="store", help="add: custom handler for table") p.add_option("--handler-arg", action="append", help="add: argument to custom handler") p.add_option("--merge-all", action="store_true", help="merge tables from all source queues", default=False) p.add_option("--no-merge", action="store_true", help="do not merge tables from source queues", default=False) p.add_option("--max-parallel-copy", metavar = "NUM", type = "int", help="max number of parallel copy processes") p.add_option("--dest-table", metavar = "NAME", help="add: name for actual table") p.add_option("--skip-non-existing", action="store_true", help="add: skip object that does not exist") return p def extra_init(self, node_type, node_db, provider_db): """Callback from CascadeAdmin init.""" if not provider_db: return pcurs = provider_db.cursor() ncurs = node_db.cursor() # sync tables q = "select table_name from londiste.get_table_list(%s)" pcurs.execute(q, [self.set_name]) for row in pcurs.fetchall(): tbl = row['table_name'] q = "select * from londiste.global_add_table(%s, %s)" ncurs.execute(q, [self.set_name, tbl]) # sync seqs q = "select seq_name, last_value from londiste.get_seq_list(%s)" pcurs.execute(q, [self.set_name]) for row in pcurs.fetchall(): seq = row['seq_name'] val = row['last_value'] q = "select * from londiste.global_update_seq(%s, %s, %s)" ncurs.execute(q, [self.set_name, seq, val]) # done node_db.commit() provider_db.commit() def is_root(self): return self.queue_info.local_node.type == 'root' def set_lock_timeout(self, curs): ms = int(1000 * self.lock_timeout) if ms > 0: q = "SET LOCAL statement_timeout = %d" % ms self.log.debug(q) curs.execute(q) def cmd_add_table(self, *args): """Attach table(s) to local node.""" self.load_local_info() src_db = self.get_provider_db() if not self.is_root(): src_curs = src_db.cursor() src_tbls = self.fetch_set_tables(src_curs) src_db.commit() dst_db = self.get_database('db') dst_curs = dst_db.cursor() dst_tbls = self.fetch_set_tables(dst_curs) if self.is_root(): src_tbls = dst_tbls else: self.sync_table_list(dst_curs, src_tbls, dst_tbls) dst_db.commit() needs_tbl = self.handler_needs_table() args = self.expand_arg_list(dst_db, 'r', False, args, needs_tbl) # pick proper create flags if self.options.create_full: create_flags = skytools.T_ALL elif self.options.create: create_flags = skytools.T_TABLE | skytools.T_PKEY else: create_flags = 0 # search for usable copy node if requested & needed if (self.options.find_copy_node and create_flags != 0 and needs_tbl and not self.is_root()): src_name, src_loc, _ = find_copy_source(self, self.queue_name, args, None, self.provider_location) self.options.copy_node = src_name self.close_database('provider_db') src_db = self.get_provider_db() src_curs = src_db.cursor() src_tbls = self.fetch_set_tables(src_curs) src_db.commit() # dont check for exist/not here (root handling) if not self.is_root() and not self.options.expect_sync and not self.options.find_copy_node: problems = False for tbl in args: tbl = skytools.fq_name(tbl) if (tbl in src_tbls) and not src_tbls[tbl]['local']: if self.options.skip_non_existing: self.log.warning("Table %s does not exist on provider", tbl) else: self.log.error("Table %s does not exist on provider, need to switch to different provider", tbl) problems = True if problems: self.log.error("Problems, canceling operation") sys.exit(1) # sanity check if self.options.dest_table and len(args) > 1: self.log.error("--dest-table can be given only for single table") sys.exit(1) # seems ok for tbl in args: self.add_table(src_db, dst_db, tbl, create_flags, src_tbls) # wait if self.options.wait_sync: self.wait_for_sync(dst_db) def add_table(self, src_db, dst_db, tbl, create_flags, src_tbls): # use full names tbl = skytools.fq_name(tbl) dest_table = self.options.dest_table or tbl dest_table = skytools.fq_name(dest_table) src_curs = src_db.cursor() dst_curs = dst_db.cursor() tbl_exists = skytools.exists_table(dst_curs, dest_table) dst_db.commit() self.set_lock_timeout(dst_curs) if dest_table == tbl: desc = tbl else: desc = "%s(%s)" % (tbl, dest_table) if create_flags: if tbl_exists: self.log.info('Table %s already exist, not touching', desc) else: src_dest_table = src_tbls[tbl]['dest_table'] if not skytools.exists_table(src_curs, src_dest_table): # table not present on provider - nowhere to get the DDL from self.log.warning('Table %s missing on provider, cannot create, skipping', desc) return schema = skytools.fq_name_parts(dest_table)[0] if not skytools.exists_schema(dst_curs, schema): q = "create schema %s" % skytools.quote_ident(schema) dst_curs.execute(q) s = skytools.TableStruct(src_curs, src_dest_table) src_db.commit() # create, using rename logic only when necessary newname = None if src_dest_table != dest_table: newname = dest_table s.create(dst_curs, create_flags, log = self.log, new_table_name = newname) elif not tbl_exists and self.options.skip_non_existing: self.log.warning('Table %s does not exist on local node, skipping', desc) return tgargs = self.build_tgargs() attrs = {} if self.options.handler: attrs['handler'] = self.build_handler(tbl, tgargs, self.options.dest_table) if self.options.find_copy_node: attrs['copy_node'] = '?' elif self.options.copy_node: attrs['copy_node'] = self.options.copy_node if not self.options.expect_sync: if self.options.skip_truncate: attrs['skip_truncate'] = 1 if self.options.max_parallel_copy: attrs['max_parallel_copy'] = self.options.max_parallel_copy # actual table registration args = [self.set_name, tbl, tgargs, None, None] if attrs: args[3] = skytools.db_urlencode(attrs) if dest_table != tbl: args[4] = dest_table q = "select * from londiste.local_add_table(%s, %s, %s, %s, %s)" self.exec_cmd(dst_curs, q, args) dst_db.commit() def build_tgargs(self): """Build trigger args""" tgargs = [] if self.options.trigger_arg: tgargs = self.options.trigger_arg tgflags = self.options.trigger_flags if tgflags: tgargs.append('tgflags='+tgflags) if self.options.no_triggers: tgargs.append('no_triggers') if self.options.merge_all: tgargs.append('merge_all') if self.options.no_merge: tgargs.append('no_merge') if self.options.expect_sync: tgargs.append('expect_sync') return tgargs def build_handler(self, tbl, tgargs, dest_table=None): """Build handler and return handler string""" hstr = londiste.handler.create_handler_string( self.options.handler, self.options.handler_arg) p = londiste.handler.build_handler(tbl, hstr, dest_table) p.add(tgargs) return hstr def handler_needs_table(self): if self.options.handler: hstr = londiste.handler.create_handler_string( self.options.handler, self.options.handler_arg) p = londiste.handler.build_handler('unused.string', hstr, None) return p.needs_table() return True def sync_table_list(self, dst_curs, src_tbls, dst_tbls): for tbl in src_tbls.keys(): q = "select * from londiste.global_add_table(%s, %s)" if tbl not in dst_tbls: self.log.info("Table %s info missing from subscriber, adding", tbl) self.exec_cmd(dst_curs, q, [self.set_name, tbl]) dst_tbls[tbl] = {'local': False, 'dest_table': tbl} for tbl in dst_tbls.keys(): q = "select * from londiste.global_remove_table(%s, %s)" if tbl not in src_tbls: self.log.info("Table %s gone but exists on subscriber, removing") self.exec_cmd(dst_curs, q, [self.set_name, tbl]) del dst_tbls[tbl] def fetch_set_tables(self, curs): q = "select table_name, local, "\ " coalesce(dest_table, table_name) as dest_table "\ " from londiste.get_table_list(%s)" curs.execute(q, [self.set_name]) res = {} for row in curs.fetchall(): res[row[0]] = row return res def cmd_remove_table(self, *args): """Detach table(s) from local node.""" db = self.get_database('db') args = self.expand_arg_list(db, 'r', True, args) q = "select * from londiste.local_remove_table(%s, %s)" self.exec_cmd_many(db, q, [self.set_name], args) def cmd_change_handler(self, tbl): """Change handler (table_attrs) of the replicated table.""" self.load_local_info() tbl = skytools.fq_name(tbl) db = self.get_database('db') curs = db.cursor() q = "select table_attrs, dest_table "\ " from londiste.get_table_list(%s) "\ " where table_name = %s and local" curs.execute(q, [self.set_name, tbl]) if curs.rowcount == 0: self.log.error("Table %s not found on this node", tbl) sys.exit(1) attrs, dest_table = curs.fetchone() attrs = skytools.db_urldecode(attrs or '') old_handler = attrs.get('handler') tgargs = self.build_tgargs() if self.options.handler: new_handler = self.build_handler(tbl, tgargs, dest_table) else: new_handler = None if old_handler == new_handler: self.log.info("Handler is already set to desired value, nothing done") sys.exit(0) if new_handler: attrs['handler'] = new_handler elif 'handler' in attrs: del attrs['handler'] args = [self.set_name, tbl, tgargs, None] if attrs: args[3] = skytools.db_urlencode(attrs) q = "select * from londiste.local_change_handler(%s, %s, %s, %s)" self.exec_cmd(curs, q, args) db.commit() def cmd_add_seq(self, *args): """Attach seqs(s) to local node.""" dst_db = self.get_database('db') dst_curs = dst_db.cursor() src_db = self.get_provider_db() src_curs = src_db.cursor() src_seqs = self.fetch_seqs(src_curs) dst_seqs = self.fetch_seqs(dst_curs) src_db.commit() self.sync_seq_list(dst_curs, src_seqs, dst_seqs) dst_db.commit() args = self.expand_arg_list(dst_db, 'S', False, args) # pick proper create flags if self.options.create_full: create_flags = skytools.T_SEQUENCE elif self.options.create: create_flags = skytools.T_SEQUENCE else: create_flags = 0 # seems ok for seq in args: seq = skytools.fq_name(seq) self.add_seq(src_db, dst_db, seq, create_flags) dst_db.commit() def add_seq(self, src_db, dst_db, seq, create_flags): src_curs = src_db.cursor() dst_curs = dst_db.cursor() seq_exists = skytools.exists_sequence(dst_curs, seq) if create_flags: if seq_exists: self.log.info('Sequence %s already exist, not creating', seq) else: if not skytools.exists_sequence(src_curs, seq): # sequence not present on provider - nowhere to get the DDL from self.log.warning('Sequence "%s" missing on provider, skipping', seq) return s = skytools.SeqStruct(src_curs, seq) src_db.commit() s.create(dst_curs, create_flags, log = self.log) elif not seq_exists: if self.options.skip_non_existing: self.log.warning('Sequence "%s" missing on local node, skipping', seq) return else: raise skytools.UsageError("Sequence %r missing on local node", seq) q = "select * from londiste.local_add_seq(%s, %s)" self.exec_cmd(dst_curs, q, [self.set_name, seq]) def fetch_seqs(self, curs): q = "select seq_name, last_value, local from londiste.get_seq_list(%s)" curs.execute(q, [self.set_name]) res = {} for row in curs.fetchall(): res[row[0]] = row return res def sync_seq_list(self, dst_curs, src_seqs, dst_seqs): for seq in src_seqs.keys(): q = "select * from londiste.global_update_seq(%s, %s, %s)" if seq not in dst_seqs: self.log.info("Sequence %s info missing from subscriber, adding", seq) self.exec_cmd(dst_curs, q, [self.set_name, seq, src_seqs[seq]['last_value']]) tmp = src_seqs[seq].copy() tmp['local'] = False dst_seqs[seq] = tmp for seq in dst_seqs.keys(): q = "select * from londiste.global_remove_seq(%s, %s)" if seq not in src_seqs: self.log.info("Sequence %s gone but exists on subscriber, removing") self.exec_cmd(dst_curs, q, [self.set_name, seq]) del dst_seqs[seq] def cmd_remove_seq(self, *args): """Detach seqs(s) from local node.""" q = "select * from londiste.local_remove_seq(%s, %s)" db = self.get_database('db') args = self.expand_arg_list(db, 'S', True, args) self.exec_cmd_many(db, q, [self.set_name], args) def cmd_resync(self, *args): """Reload data from provider node.""" db = self.get_database('db') args = self.expand_arg_list(db, 'r', True, args) if not self.options.find_copy_node: self.load_local_info() src_db = self.get_provider_db() src_curs = src_db.cursor() src_tbls = self.fetch_set_tables(src_curs) src_db.commit() problems = 0 for tbl in args: tbl = skytools.fq_name(tbl) if tbl not in src_tbls or not src_tbls[tbl]['local']: self.log.error("Table %s does not exist on provider, need to switch to different provider", tbl) problems += 1 if problems > 0: self.log.error("Problems, cancelling operation") sys.exit(1) if self.options.find_copy_node or self.options.copy_node: q = "select table_name, table_attrs from londiste.get_table_list(%s) where local" cur = db.cursor() cur.execute(q, [self.set_name]) for row in cur.fetchall(): if row['table_name'] not in args: continue attrs = skytools.db_urldecode (row['table_attrs'] or '') if self.options.find_copy_node: attrs['copy_node'] = '?' elif self.options.copy_node: attrs['copy_node'] = self.options.copy_node attrs = skytools.db_urlencode (attrs) q = "select * from londiste.local_set_table_attrs (%s, %s, %s)" self.exec_cmd(db, q, [self.set_name, row['table_name'], attrs]) q = "select * from londiste.local_set_table_state(%s, %s, null, null)" self.exec_cmd_many(db, q, [self.set_name], args) def cmd_tables(self): """Show attached tables.""" q = """select table_name, merge_state, table_attrs from londiste.get_table_list(%s) where local order by table_name""" db = self.get_database('db') def show_attr(a): if a: return skytools.db_urldecode(a) return '' self.display_table(db, "Tables on node", q, [self.set_name], fieldfmt = {'table_attrs': show_attr}) def cmd_seqs(self): """Show attached seqs.""" q = "select seq_name, local, last_value from londiste.get_seq_list(%s)" db = self.get_database('db') self.display_table(db, "Sequences on node", q, [self.set_name]) def cmd_missing(self): """Show missing tables on local node.""" q = "select * from londiste.local_show_missing(%s)" db = self.get_database('db') self.display_table(db, "Missing objects on node", q, [self.set_name]) def cmd_check(self): """TODO: check if structs match""" pass def cmd_fkeys(self): """TODO: show removed fkeys.""" pass def cmd_triggers(self): """TODO: show removed triggers.""" pass def cmd_show_handlers(self, *args): """Show help about handlers.""" londiste.handler.show(args) def cmd_execute(self, *files): db = self.get_database('db') curs = db.cursor() tables = self.fetch_set_tables(curs) seqs = self.fetch_seqs(curs) # generate local maps local_tables = {} local_seqs = {} for tbl in tables.values(): if tbl['local']: local_tables[tbl['table_name']] = tbl['dest_table'] for seq in seqs.values(): if seq['local']: local_seqs[seq['seq_name']] = seq['seq_name'] # set replica role for EXECUTE transaction if db.server_version >= 80300: curs.execute("set local session_replication_role = 'local'") for fn in files: fname = os.path.basename(fn) sql = open(fn, "r").read() attrs = ExecAttrs(sql = sql) q = "select * from londiste.execute_start(%s, %s, %s, true, %s)" res = self.exec_cmd(db, q, [self.queue_name, fname, sql, attrs.to_urlenc()], commit = False) ret = res[0]['ret_code'] if ret > 200: self.log.warning("Skipping execution of '%s'", fname) continue if attrs.need_execute(curs, local_tables, local_seqs): self.log.info("%s: executing sql", fname) xsql = attrs.process_sql(sql, local_tables, local_seqs) for stmt in skytools.parse_statements(xsql): curs.execute(stmt) else: self.log.info("%s: This SQL does not need to run on this node.", fname) q = "select * from londiste.execute_finish(%s, %s)" self.exec_cmd(db, q, [self.queue_name, fname], commit = False) db.commit() def get_provider_db(self): if self.options.copy_node: # use custom node for copy source_node = self.options.copy_node m = self.queue_info.get_member(source_node) if not m: raise skytools.UsageError("Cannot find node <%s>", source_node) if source_node == self.local_node: raise skytools.UsageError("Cannot use itself as provider") self.provider_location = m.location if not self.provider_location: db = self.get_database('db') q = 'select * from pgq_node.get_node_info(%s)' res = self.exec_cmd(db, q, [self.queue_name], quiet = True) self.provider_location = res[0]['provider_location'] return self.get_database('provider_db', connstr = self.provider_location, profile = 'remote') def expand_arg_list(self, db, kind, existing, args, needs_tbl=True): curs = db.cursor() if kind == 'S': q1 = "select seq_name, local from londiste.get_seq_list(%s) where local" elif kind == 'r': q1 = "select table_name, local from londiste.get_table_list(%s) where local" else: raise Exception("bug") q2 = "select obj_name from londiste.local_show_missing(%%s) where obj_kind = '%s'" % kind lst_exists = [] map_exists = {} curs.execute(q1, [self.set_name]) for row in curs.fetchall(): lst_exists.append(row[0]) map_exists[row[0]] = 1 lst_missing = [] map_missing = {} curs.execute(q2, [self.set_name]) for row in curs.fetchall(): lst_missing.append(row[0]) map_missing[row[0]] = 1 db.commit() if not args and self.options.all: if existing: return lst_exists else: return lst_missing allow_nonexist = not needs_tbl if existing: res = self.solve_globbing(args, lst_exists, map_exists, map_missing, allow_nonexist) else: res = self.solve_globbing(args, lst_missing, map_missing, map_exists, allow_nonexist) if not res: self.log.info("what to do ?") return res def solve_globbing(self, args, full_list, full_map, reverse_map, allow_nonexist): def glob2regex(s): s = s.replace('.', '[.]').replace('?', '.').replace('*', '.*') return '^%s$' % s res_map = {} res_list = [] err = 0 for a in args: if a.find('*') >= 0 or a.find('?') >= 0: if a.find('.') < 0: a = 'public.' + a rc = re.compile(glob2regex(a)) for x in full_list: if rc.match(x): if not x in res_map: res_map[x] = 1 res_list.append(x) else: a = skytools.fq_name(a) if a in res_map: continue elif a in full_map: res_list.append(a) res_map[a] = 1 elif a in reverse_map: self.log.info("%s already processed", a) elif allow_nonexist: res_list.append(a) res_map[a] = 1 elif self.options.force: self.log.warning("%s not available, but --force is used", a) res_list.append(a) res_map[a] = 1 else: self.log.warning("%s not available", a) err = 1 if err: raise skytools.UsageError("Cannot proceed") return res_list def load_extra_status(self, curs, node): """Fetch extra info.""" # must be thread-safe (!) CascadeAdmin.load_extra_status(self, curs, node) curs.execute("select * from londiste.get_table_list(%s)", [self.queue_name]) n_ok = n_half = n_ign = 0 for tbl in curs.fetchall(): if not tbl['local']: n_ign += 1 elif tbl['merge_state'] == 'ok': n_ok += 1 else: n_half += 1 node.add_info_line('Tables: %d/%d/%d' % (n_ok, n_half, n_ign)) def cmd_wait_sync(self): self.load_local_info() dst_db = self.get_database('db') self.wait_for_sync(dst_db) def wait_for_sync(self, dst_db): self.log.info("Waiting until all tables are in sync") q = "select table_name, merge_state, local"\ " from londiste.get_table_list(%s) where local" dst_curs = dst_db.cursor() partial = {} startup_info = 0 while 1: dst_curs.execute(q, [self.queue_name]) rows = dst_curs.fetchall() dst_db.commit() total_count = 0 cur_count = 0 done_list = [] for row in rows: if not row['local']: continue total_count += 1 tbl = row['table_name'] if row['merge_state'] != 'ok': partial[tbl] = 0 cur_count += 1 elif tbl in partial: if partial[tbl] == 0: partial[tbl] = 1 done_list.append(tbl) done_count = total_count - cur_count if not startup_info: self.log.info("%d/%d table(s) to copy", cur_count, total_count) startup_info = 1 for done in done_list: self.log.info("%s: finished (%d/%d)", done, done_count, total_count) if cur_count == 0: break self.sleep(2) self.log.info("All done") def resurrect_dump_event(self, ev, stats, batch_info): """Collect per-table stats.""" super(LondisteSetup, self).resurrect_dump_event(ev, stats, batch_info) ROLLBACK = 'can rollback' NO_ROLLBACK = 'cannot rollback' if ev.ev_type == 'TRUNCATE': if 'truncated_tables' not in stats: stats['truncated_tables'] = [] tlist = stats['truncated_tables'] tbl = ev.ev_extra1 if tbl not in tlist: tlist.append(tbl) elif ev.ev_type[:2] in ('I:', 'U:', 'D:', 'I', 'U', 'D'): op = ev.ev_type[0] tbl = ev.ev_extra1 bak = ev.ev_extra3 tblkey = 'table: %s' % tbl if tblkey not in stats: stats[tblkey] = [0,0,0,ROLLBACK] tinfo = stats[tblkey] if op == 'I': tinfo[0] += 1 elif op == 'U': tinfo[1] += 1 if not bak: tinfo[3] = NO_ROLLBACK elif op == 'D': tinfo[2] += 1 if not bak and ev.ev_type == 'D': tinfo[3] = NO_ROLLBACK skytools-3.2.6/python/londiste/playback.py0000644000000000000000000010305312426435645015616 0ustar #! /usr/bin/env python """Basic replication core.""" import sys, os, time import skytools from pgq.cascade.worker import CascadedWorker from londiste.handler import * from londiste.exec_attrs import ExecAttrs __all__ = ['Replicator', 'TableState', 'TABLE_MISSING', 'TABLE_IN_COPY', 'TABLE_CATCHING_UP', 'TABLE_WANNA_SYNC', 'TABLE_DO_SYNC', 'TABLE_OK'] # state # owner - who is allowed to change TABLE_MISSING = 0 # main TABLE_IN_COPY = 1 # copy TABLE_CATCHING_UP = 2 # copy TABLE_WANNA_SYNC = 3 # main TABLE_DO_SYNC = 4 # copy TABLE_OK = 5 # setup SYNC_OK = 0 # continue with batch SYNC_LOOP = 1 # sleep, try again SYNC_EXIT = 2 # nothing to do, exit script MAX_PARALLEL_COPY = 8 # default number of allowed max parallel copy processes class Counter(object): """Counts table statuses.""" missing = 0 copy = 0 catching_up = 0 wanna_sync = 0 do_sync = 0 ok = 0 def __init__(self, tables): """Counts and sanity checks.""" for t in tables: if t.state == TABLE_MISSING: self.missing += 1 elif t.state == TABLE_IN_COPY: self.copy += 1 elif t.state == TABLE_CATCHING_UP: self.catching_up += 1 elif t.state == TABLE_WANNA_SYNC: self.wanna_sync += 1 elif t.state == TABLE_DO_SYNC: self.do_sync += 1 elif t.state == TABLE_OK: self.ok += 1 def get_copy_count(self): return self.copy + self.catching_up + self.wanna_sync + self.do_sync class TableState(object): """Keeps state about one table.""" def __init__(self, name, log): """Init TableState for one table.""" self.name = name self.dest_table = name self.log = log # same as forget: self.state = TABLE_MISSING self.last_snapshot_tick = None self.str_snapshot = None self.from_snapshot = None self.sync_tick_id = None self.ok_batch_count = 0 self.last_tick = 0 self.table_attrs = {} self.copy_role = None self.dropped_ddl = None self.plugin = None # except this self.changed = 0 # position in parallel copy work order self.copy_pos = 0 # max number of parallel copy processes allowed self.max_parallel_copy = MAX_PARALLEL_COPY def forget(self): """Reset all info.""" self.state = TABLE_MISSING self.last_snapshot_tick = None self.str_snapshot = None self.from_snapshot = None self.sync_tick_id = None self.ok_batch_count = 0 self.last_tick = 0 self.table_attrs = {} self.changed = 1 self.plugin = None self.copy_pos = 0 self.max_parallel_copy = MAX_PARALLEL_COPY def change_snapshot(self, str_snapshot, tag_changed = 1): """Set snapshot.""" if self.str_snapshot == str_snapshot: return self.log.debug("%s: change_snapshot to %s", self.name, str_snapshot) self.str_snapshot = str_snapshot if str_snapshot: self.from_snapshot = skytools.Snapshot(str_snapshot) else: self.from_snapshot = None if tag_changed: self.ok_batch_count = 0 self.last_tick = None self.changed = 1 def change_state(self, state, tick_id = None): """Set state.""" if self.state == state and self.sync_tick_id == tick_id: return self.state = state self.sync_tick_id = tick_id self.changed = 1 self.log.debug("%s: change_state to %s", self.name, self.render_state()) def render_state(self): """Make a string to be stored in db.""" if self.state == TABLE_MISSING: return None elif self.state == TABLE_IN_COPY: return 'in-copy' elif self.state == TABLE_CATCHING_UP: return 'catching-up' elif self.state == TABLE_WANNA_SYNC: return 'wanna-sync:%d' % self.sync_tick_id elif self.state == TABLE_DO_SYNC: return 'do-sync:%d' % self.sync_tick_id elif self.state == TABLE_OK: return 'ok' def parse_state(self, merge_state): """Read state from string.""" state = -1 if merge_state == None: state = TABLE_MISSING elif merge_state == "in-copy": state = TABLE_IN_COPY elif merge_state == "catching-up": state = TABLE_CATCHING_UP elif merge_state == "ok": state = TABLE_OK elif merge_state == "?": state = TABLE_OK else: tmp = merge_state.split(':') if len(tmp) == 2: self.sync_tick_id = int(tmp[1]) if tmp[0] == 'wanna-sync': state = TABLE_WANNA_SYNC elif tmp[0] == 'do-sync': state = TABLE_DO_SYNC if state < 0: raise Exception("Bad table state: %s" % merge_state) return state def loaded_state(self, row): """Update object with info from db.""" self.log.debug("loaded_state: %s: %s / %s", self.name, row['merge_state'], row['custom_snapshot']) self.change_snapshot(row['custom_snapshot'], 0) self.state = self.parse_state(row['merge_state']) self.changed = 0 if row['table_attrs']: self.table_attrs = skytools.db_urldecode(row['table_attrs']) else: self.table_attrs = {} self.copy_role = row['copy_role'] self.dropped_ddl = row['dropped_ddl'] if row['merge_state'] == "?": self.changed = 1 self.copy_pos = int(row.get('copy_pos','0')) self.max_parallel_copy = int(self.table_attrs.get('max_parallel_copy', self.max_parallel_copy)) if 'dest_table' in row and row['dest_table']: self.dest_table = row['dest_table'] else: self.dest_table = self.name hstr = self.table_attrs.get('handlers', '') # compat hstr = self.table_attrs.get('handler', hstr) self.plugin = build_handler(self.name, hstr, self.dest_table) def max_parallel_copies_reached(self): return self.max_parallel_copy and\ self.copy_pos >= self.max_parallel_copy def interesting(self, ev, tick_id, copy_thread, copy_table_name): """Check if table wants this event.""" if copy_thread: if self.name != copy_table_name: return False if self.state not in (TABLE_CATCHING_UP, TABLE_DO_SYNC): return False else: if self.state != TABLE_OK: return False # if no snapshot tracking, then accept always if not self.from_snapshot: return True # uninteresting? if self.from_snapshot.contains(ev.txid): return False # after couple interesting batches there no need to check snapshot # as there can be only one partially interesting batch if tick_id != self.last_tick: self.last_tick = tick_id self.ok_batch_count += 1 # disable batch tracking if self.ok_batch_count > 3: self.change_snapshot(None) return True def gc_snapshot(self, copy_thread, prev_tick, cur_tick, no_lag): """Remove attached snapshot if possible. If the event processing is in current moment, the snapshot is not needed beyond next batch. The logic is needed for mostly unchanging tables, where the .ok_batch_count check in .interesting() method can take a lot of time. """ # check if gc is needed if self.str_snapshot is None: return # check if allowed to modify if copy_thread: if self.state != TABLE_CATCHING_UP: return else: if self.state != TABLE_OK: return False # aquire last tick if not self.last_snapshot_tick: if no_lag: self.last_snapshot_tick = cur_tick return # reset snapshot if not needed anymore if self.last_snapshot_tick < prev_tick: self.change_snapshot(None) def get_plugin(self): return self.plugin class Replicator(CascadedWorker): """Replication core. Config options:: ## Parameters for Londiste ## # target database db = dbname=somedb host=127.0.0.1 # extra connect string parameters to add to node public connect strings. # useful values: user= sslmode= #remote_extra_connstr = # how many tables can be copied in parallel #parallel_copies = 1 # accept only events for locally present tables #local_only = true ## compare/repair # max amount of time table can be locked #lock_timeout = 10 # compare: sql to use #compare_sql = select count(1) as cnt, sum(hashtext(t.*::text)) as chksum from only _TABLE_ t # workaround for hashtext change between 8.3 and 8.4 #compare_sql = select count(1) as cnt, sum(('x'||substr(md5(t.*::text),1,16))::bit(64)::bigint) as chksum from only _TABLE_ t #compare_fmt = %(cnt)d rows, checksum=%(chksum)s ## Parameters for initial node creation: create-root/branch/leaf ## # These parameters can be given on either command-line or in config # command-line values override config values. Those values are # used only during create time, otherwise they are loaded from database. # Name for local node. #node_name = # public connect string for local node, which other nodes will use # to connect to this one. #public_node_location = # connect string for existing node to use as provider #initial_provider_location = """ # batch info cur_tick = 0 prev_tick = 0 copy_table_name = None # filled by Copytable() sql_list = [] current_event = None def __init__(self, args): """Replication init.""" CascadedWorker.__init__(self, 'londiste3', 'db', args) self.table_list = [] self.table_map = {} self.copy_thread = 0 self.set_name = self.queue_name self.used_plugins = {} self.parallel_copies = self.cf.getint('parallel_copies', 1) if self.parallel_copies < 1: raise Exception('Bad value for parallel_copies: %d' % self.parallel_copies) self.consumer_filter = None load_handler_modules(self.cf) def connection_hook(self, dbname, db): if dbname == 'db' and db.server_version >= 80300: curs = db.cursor() curs.execute("set session_replication_role = 'replica'") db.commit() code_check_done = 0 def check_code(self, db): objs = [ skytools.DBFunction("pgq.maint_operations", 0, sql_file = "londiste.maint-upgrade.sql"), ] skytools.db_install(db.cursor(), objs, self.log) db.commit() def process_remote_batch(self, src_db, tick_id, ev_list, dst_db): "All work for a batch. Entry point from SetConsumer." self.current_event = None # this part can play freely with transactions if not self.code_check_done: self.check_code(dst_db) self.code_check_done = 1 self.sync_database_encodings(src_db, dst_db) self.cur_tick = self.batch_info['tick_id'] self.prev_tick = self.batch_info['prev_tick_id'] dst_curs = dst_db.cursor() self.load_table_state(dst_curs) self.sync_tables(src_db, dst_db) self.copy_snapshot_cleanup(dst_db) # only main thread is allowed to restore fkeys if not self.copy_thread: self.restore_fkeys(dst_db) for p in self.used_plugins.values(): p.reset() self.used_plugins = {} # now the actual event processing happens. # they must be done all in one tx in dst side # and the transaction must be kept open so that # the cascade-consumer can save last tick and commit. self.sql_list = [] CascadedWorker.process_remote_batch(self, src_db, tick_id, ev_list, dst_db) self.flush_sql(dst_curs) for p in self.used_plugins.values(): p.finish_batch(self.batch_info, dst_curs) self.used_plugins = {} # finalize table changes self.save_table_state(dst_curs) # store event filter if self.cf.getboolean('local_only', False): # create list of tables if self.copy_thread: _filterlist = skytools.quote_literal(self.copy_table_name) else: _filterlist = ','.join(map(skytools.quote_literal, self.table_map.keys())) # build filter meta = "(ev_type like 'pgq.%' or ev_type like 'londiste.%')" if _filterlist: self.consumer_filter = "(%s or (ev_extra1 in (%s)))" % (meta, _filterlist) else: self.consumer_filter = meta else: # no filter self.consumer_filter = None def sync_tables(self, src_db, dst_db): """Table sync loop. Calls appropriate handles, which is expected to return one of SYNC_* constants.""" self.log.debug('Sync tables') while 1: cnt = Counter(self.table_list) if self.copy_thread: res = self.sync_from_copy_thread(cnt, src_db, dst_db) else: res = self.sync_from_main_thread(cnt, src_db, dst_db) if res == SYNC_EXIT: self.log.debug('Sync tables: exit') if self.copy_thread: self.unregister_consumer() src_db.commit() sys.exit(0) elif res == SYNC_OK: return elif res != SYNC_LOOP: raise Exception('Program error') self.log.debug('Sync tables: sleeping') time.sleep(3) dst_db.commit() self.load_table_state(dst_db.cursor()) dst_db.commit() dsync_backup = None def sync_from_main_thread(self, cnt, src_db, dst_db): "Main thread sync logic." # This operates on all table, any amount can be in any state ret = SYNC_OK if cnt.do_sync: # wait for copy thread to catch up ret = SYNC_LOOP # we need to do wanna-sync->do_sync with small batches need_dsync = False dsync_ok = True if self.pgq_min_interval or self.pgq_min_count: dsync_ok = False elif self.dsync_backup and self.dsync_backup[0] >= self.cur_tick: dsync_ok = False # now check if do-sync is needed for t in self.get_tables_in_state(TABLE_WANNA_SYNC): # copy thread wants sync, if not behind, do it if self.cur_tick >= t.sync_tick_id: if dsync_ok: self.change_table_state(dst_db, t, TABLE_DO_SYNC, self.cur_tick) ret = SYNC_LOOP else: need_dsync = True # tune batch size if needed if need_dsync: if self.pgq_min_count or self.pgq_min_interval: bak = (self.cur_tick, self.pgq_min_count, self.pgq_min_interval) self.dsync_backup = bak self.pgq_min_count = None self.pgq_min_interval = None elif self.dsync_backup: self.pgq_min_count = self.dsync_backup[1] self.pgq_min_interval = self.dsync_backup[2] self.dsync_backup = None # now handle new copies npossible = self.parallel_copies - cnt.get_copy_count() if cnt.missing and npossible > 0: pmap = self.get_state_map(src_db.cursor()) src_db.commit() for t in self.get_tables_in_state(TABLE_MISSING): if 'copy_node' in t.table_attrs: # should we go and check this node? pass else: # regular provider is used if t.name not in pmap: self.log.warning("Table %s not available on provider", t.name) continue pt = pmap[t.name] if pt.state != TABLE_OK: # or pt.custom_snapshot: # FIXME: does snapsnot matter? self.log.info("Table %s not OK on provider, waiting", t.name) continue # don't allow more copies than configured if npossible == 0: break npossible -= 1 # drop all foreign keys to and from this table self.drop_fkeys(dst_db, t.dest_table) # change state after fkeys are dropped thus allowing # failure inbetween self.change_table_state(dst_db, t, TABLE_IN_COPY) # the copy _may_ happen immediately self.launch_copy(t) # there cannot be interesting events in current batch # but maybe there's several tables, lets do them in one go ret = SYNC_LOOP return ret def sync_from_copy_thread(self, cnt, src_db, dst_db): "Copy thread sync logic." # somebody may have done remove-table in the meantime if self.copy_table_name not in self.table_map: self.log.error("copy_sync: lost table: %s", self.copy_table_name) return SYNC_EXIT # This operates on single table t = self.table_map[self.copy_table_name] if t.state == TABLE_DO_SYNC: # these settings may cause copy to miss right tick self.pgq_min_count = None self.pgq_min_interval = None # main thread is waiting, catch up, then handle over if self.cur_tick == t.sync_tick_id: self.change_table_state(dst_db, t, TABLE_OK) return SYNC_EXIT elif self.cur_tick < t.sync_tick_id: return SYNC_OK else: self.log.error("copy_sync: cur_tick=%d sync_tick=%d", self.cur_tick, t.sync_tick_id) raise Exception('Invalid table state') elif t.state == TABLE_WANNA_SYNC: # wait for main thread to react return SYNC_LOOP elif t.state == TABLE_CATCHING_UP: # partition merging if t.copy_role in ('wait-replay', 'lead'): return SYNC_LOOP # copy just finished if t.dropped_ddl: self.restore_copy_ddl(t, dst_db) return SYNC_OK # is there more work? if self.work_state: return SYNC_OK # seems we have catched up self.change_table_state(dst_db, t, TABLE_WANNA_SYNC, self.cur_tick) return SYNC_LOOP elif t.state == TABLE_IN_COPY: # table is not copied yet, do it self.do_copy(t, src_db, dst_db) # forget previous value self.work_state = 1 return SYNC_LOOP else: # nothing to do return SYNC_EXIT def restore_copy_ddl(self, ts, dst_db): self.log.info("%s: restoring DDL", ts.name) dst_curs = dst_db.cursor() for ddl in skytools.parse_statements(ts.dropped_ddl): self.log.info(ddl) dst_curs.execute(ddl) q = "select * from londiste.local_set_table_struct(%s, %s, NULL)" self.exec_cmd(dst_curs, q, [self.queue_name, ts.name]) ts.dropped_ddl = None dst_db.commit() # analyze self.log.info("%s: analyze", ts.name) dst_curs.execute("analyze " + skytools.quote_fqident(ts.name)) dst_db.commit() def do_copy(self, tbl, src_db, dst_db): """Callback for actual copy implementation.""" raise Exception('do_copy not implemented') def process_remote_event(self, src_curs, dst_curs, ev): """handle one event""" self.log.debug("New event: id=%s / type=%s / data=%s / extra1=%s", ev.id, ev.type, ev.data, ev.extra1) # set current_event only if processing them one-by-one if self.work_state < 0: self.current_event = ev if ev.type in ('I', 'U', 'D'): self.handle_data_event(ev, dst_curs) elif ev.type[:2] in ('I:', 'U:', 'D:'): self.handle_data_event(ev, dst_curs) elif ev.type == "R": self.flush_sql(dst_curs) self.handle_truncate_event(ev, dst_curs) elif ev.type == 'EXECUTE': self.flush_sql(dst_curs) self.handle_execute_event(ev, dst_curs) elif ev.type == 'londiste.add-table': self.flush_sql(dst_curs) self.add_set_table(dst_curs, ev.data) elif ev.type == 'londiste.remove-table': self.flush_sql(dst_curs) self.remove_set_table(dst_curs, ev.data) elif ev.type == 'londiste.remove-seq': self.flush_sql(dst_curs) self.remove_set_seq(dst_curs, ev.data) elif ev.type == 'londiste.update-seq': self.flush_sql(dst_curs) self.update_seq(dst_curs, ev) else: CascadedWorker.process_remote_event(self, src_curs, dst_curs, ev) # no point keeping it around longer self.current_event = None def handle_data_event(self, ev, dst_curs): """handle one data event""" t = self.get_table_by_name(ev.extra1) if not t or not t.interesting(ev, self.cur_tick, self.copy_thread, self.copy_table_name): self.stat_increase('ignored_events') return try: p = self.used_plugins[ev.extra1] except KeyError: p = t.get_plugin() self.used_plugins[ev.extra1] = p p.prepare_batch(self.batch_info, dst_curs) p.process_event(ev, self.apply_sql, dst_curs) def handle_truncate_event(self, ev, dst_curs): """handle one truncate event""" t = self.get_table_by_name(ev.extra1) if not t or not t.interesting(ev, self.cur_tick, self.copy_thread, self.copy_table_name): self.stat_increase('ignored_events') return fqname = skytools.quote_fqident(t.dest_table) try: p = self.used_plugins[ev.extra1] except KeyError: p = t.get_plugin() self.used_plugins[ev.extra1] = p if p.conf.get('ignore_truncate'): self.log.info("ignoring truncate for %s", fqname) return # # Always use CASCADE, because without it the # operation cannot work with FKeys, on both # slave and master. # sql = "TRUNCATE %s CASCADE;" % fqname self.flush_sql(dst_curs) dst_curs.execute(sql) def handle_execute_event(self, ev, dst_curs): """handle one EXECUTE event""" if self.copy_thread: return # parse event fname = ev.extra1 s_attrs = ev.extra2 exec_attrs = ExecAttrs(urlenc = s_attrs) sql = ev.data # fixme: curs? pgver = dst_curs.connection.server_version if pgver >= 80300: dst_curs.execute("set local session_replication_role = 'local'") seq_map = {} q = "select seq_name, local from londiste.get_seq_list(%s) where local" dst_curs.execute(q, [self.queue_name]) for row in dst_curs.fetchall(): seq_map[row['seq_name']] = row['seq_name'] tbl_map = {} for tbl, t in self.table_map.items(): tbl_map[t.name] = t.dest_table q = "select * from londiste.execute_start(%s, %s, %s, false, %s)" res = self.exec_cmd(dst_curs, q, [self.queue_name, fname, sql, s_attrs], commit = False) ret = res[0]['ret_code'] if ret > 200: self.log.warning("Skipping execution of '%s'", fname) if pgver >= 80300: dst_curs.execute("set local session_replication_role = 'replica'") return if exec_attrs.need_execute(dst_curs, tbl_map, seq_map): self.log.info("%s: executing sql") xsql = exec_attrs.process_sql(sql, tbl_map, seq_map) for stmt in skytools.parse_statements(xsql): dst_curs.execute(stmt) else: self.log.info("%s: execution not needed on this node") q = "select * from londiste.execute_finish(%s, %s)" self.exec_cmd(dst_curs, q, [self.queue_name, fname], commit = False) if pgver >= 80300: dst_curs.execute("set local session_replication_role = 'replica'") def apply_sql(self, sql, dst_curs): # how many queries to batch together, drop batching on error limit = 200 if self.work_state == -1: limit = 0 self.sql_list.append(sql) if len(self.sql_list) >= limit: self.flush_sql(dst_curs) def flush_sql(self, dst_curs): """Send all buffered statements to DB.""" if len(self.sql_list) == 0: return buf = "\n".join(self.sql_list) self.sql_list = [] dst_curs.execute(buf) def add_set_table(self, dst_curs, tbl): """There was new table added to root, remember it.""" q = "select londiste.global_add_table(%s, %s)" dst_curs.execute(q, [self.set_name, tbl]) def remove_set_table(self, dst_curs, tbl): """There was table dropped from root, remember it.""" if tbl in self.table_map: t = self.table_map[tbl] del self.table_map[tbl] self.table_list.remove(t) q = "select londiste.global_remove_table(%s, %s)" dst_curs.execute(q, [self.set_name, tbl]) def remove_set_seq(self, dst_curs, seq): """There was seq dropped from root, remember it.""" q = "select londiste.global_remove_seq(%s, %s)" dst_curs.execute(q, [self.set_name, seq]) def load_table_state(self, curs): """Load table state from database. Todo: if all tables are OK, there is no need to load state on every batch. """ q = "select * from londiste.get_table_list(%s)" curs.execute(q, [self.set_name]) new_list = [] new_map = {} for row in curs.fetchall(): if not row['local']: continue t = self.get_table_by_name(row['table_name']) if not t: t = TableState(row['table_name'], self.log) t.loaded_state(row) new_list.append(t) new_map[t.name] = t self.table_list = new_list self.table_map = new_map def get_state_map(self, curs): """Get dict of table states.""" q = "select * from londiste.get_table_list(%s)" curs.execute(q, [self.set_name]) new_map = {} for row in curs.fetchall(): if not row['local']: continue t = TableState(row['table_name'], self.log) t.loaded_state(row) new_map[t.name] = t return new_map def save_table_state(self, curs): """Store changed table state in database.""" for t in self.table_list: # backwards compat: move plugin-only dest_table to table_info if t.dest_table != t.plugin.dest_table: self.log.info("Overwriting .dest_table from plugin: tbl=%s dst=%s", t.name, t.plugin.dest_table) q = "update londiste.table_info set dest_table = %s"\ " where queue_name = %s and table_name = %s" curs.execute(q, [t.plugin.dest_table, self.set_name, t.name]) if not t.changed: continue merge_state = t.render_state() self.log.info("storing state of %s: copy:%d new_state:%s", t.name, self.copy_thread, merge_state) q = "select londiste.local_set_table_state(%s, %s, %s, %s)" curs.execute(q, [self.set_name, t.name, t.str_snapshot, merge_state]) t.changed = 0 def change_table_state(self, dst_db, tbl, state, tick_id = None): """Chage state for table.""" tbl.change_state(state, tick_id) self.save_table_state(dst_db.cursor()) dst_db.commit() self.log.info("Table %s status changed to '%s'", tbl.name, tbl.render_state()) def get_tables_in_state(self, state): "get all tables with specific state" for t in self.table_list: if t.state == state: yield t def get_table_by_name(self, name): """Returns cached state object.""" if name.find('.') < 0: name = "public.%s" % name if name in self.table_map: return self.table_map[name] return None def launch_copy(self, tbl_stat): """Run parallel worker for copy.""" self.log.info("Launching copy process") script = sys.argv[0] conf = self.cf.filename cmd = [script, conf, 'copy', tbl_stat.name, '-d'] # pass same verbosity options as main script got if self.options.quiet: cmd.append('-q') if self.options.verbose: cmd += ['-v'] * self.options.verbose # let existing copy finish and clean its pidfile, # otherwise new copy will exit immediately. # FIXME: should not happen on per-table pidfile ??? copy_pidfile = "%s.copy.%s" % (self.pidfile, tbl_stat.name) while skytools.signal_pidfile(copy_pidfile, 0): self.log.warning("Waiting for existing copy to exit") time.sleep(2) # launch and wait for daemonization result self.log.debug("Launch args: %r", cmd) res = os.spawnvp(os.P_WAIT, script, cmd) self.log.debug("Launch result: %r", res) if res != 0: self.log.error("Failed to launch copy process, result=%d", res) def sync_database_encodings(self, src_db, dst_db): """Make sure client_encoding is same on both side.""" try: # psycopg2 if src_db.encoding != dst_db.encoding: dst_db.set_client_encoding(src_db.encoding) except AttributeError: # psycopg1 src_curs = src_db.cursor() dst_curs = dst_db.cursor() src_curs.execute("show client_encoding") src_enc = src_curs.fetchone()[0] dst_curs.execute("show client_encoding") dst_enc = dst_curs.fetchone()[0] if src_enc != dst_enc: dst_curs.execute("set client_encoding = %s", [src_enc]) def copy_snapshot_cleanup(self, dst_db): """Remove unnecessary snapshot info from tables.""" no_lag = not self.work_state changes = False for t in self.table_list: t.gc_snapshot(self.copy_thread, self.prev_tick, self.cur_tick, no_lag) if t.changed: changes = True if changes: self.save_table_state(dst_db.cursor()) dst_db.commit() def restore_fkeys(self, dst_db): """Restore fkeys that have both tables on sync.""" dst_curs = dst_db.cursor() # restore fkeys -- one at a time q = "select * from londiste.get_valid_pending_fkeys(%s)" dst_curs.execute(q, [self.set_name]) fkey_list = dst_curs.fetchall() for row in fkey_list: self.log.info('Creating fkey: %(fkey_name)s (%(from_table)s --> %(to_table)s)' % row) q2 = "select londiste.restore_table_fkey(%(from_table)s, %(fkey_name)s)" dst_curs.execute(q2, row) dst_db.commit() def drop_fkeys(self, dst_db, table_name): """Drop all foreign keys to and from this table. They need to be dropped one at a time to avoid deadlocks with user code. """ dst_curs = dst_db.cursor() q = "select * from londiste.find_table_fkeys(%s)" dst_curs.execute(q, [table_name]) fkey_list = dst_curs.fetchall() for row in fkey_list: self.log.info('Dropping fkey: %s' % row['fkey_name']) q2 = "select londiste.drop_table_fkey(%(from_table)s, %(fkey_name)s)" dst_curs.execute(q2, row) dst_db.commit() def process_root_node(self, dst_db): """On root node send seq changes to queue.""" CascadedWorker.process_root_node(self, dst_db) q = "select * from londiste.root_check_seqs(%s)" self.exec_cmd(dst_db, q, [self.queue_name]) def update_seq(self, dst_curs, ev): if self.copy_thread: return val = int(ev.data) seq = ev.extra1 q = "select * from londiste.global_update_seq(%s, %s, %s)" self.exec_cmd(dst_curs, q, [self.queue_name, seq, val]) def copy_event(self, dst_curs, ev, filtered_copy): # send only data events down (skipping seqs also) if filtered_copy: if ev.type[:9] in ('londiste.',): return CascadedWorker.copy_event(self, dst_curs, ev, filtered_copy) def exception_hook(self, det, emsg): # add event info to error message if self.current_event: ev = self.current_event info = "[ev_id=%d,ev_txid=%d] " % (ev.ev_id,ev.ev_txid) emsg = info + emsg super(Replicator, self).exception_hook(det, emsg) if __name__ == '__main__': script = Replicator(sys.argv[1:]) script.start() skytools-3.2.6/python/londiste/__init__.py0000644000000000000000000000113112426435645015561 0ustar """Replication on top of PgQ.""" __pychecker__ = 'no-miximport' import londiste.playback import londiste.compare import londiste.setup import londiste.table_copy import londiste.repair import londiste.handler from londiste.playback import * from londiste.compare import * from londiste.setup import * from londiste.table_copy import * from londiste.repair import * from londiste.handler import * __all__ = ( londiste.playback.__all__ + londiste.compare.__all__ + londiste.handler.__all__ + londiste.setup.__all__ + londiste.table_copy.__all__ + londiste.repair.__all__ ) skytools-3.2.6/python/setadm.py0000755000000000000000000000045312426435645013467 0ustar #! /usr/bin/env python """SetAdmin launcher. """ import sys import pkgloader pkgloader.require('skytools', '3.0') import pgq.cascade.admin if __name__ == '__main__': script = pgq.cascade.admin.CascadeAdmin('cascade_admin', 'node_db', sys.argv[1:], worker_setup = False) script.start() skytools-3.2.6/python/qadmin.py0000755000000000000000000013170512426435645013470 0ustar #! /usr/bin/env python """Commands that require only database connection: connect dbname=.. host=.. service=.. queue=..; connect [ queue=.. ] [ node=.. ]; install pgq | londiste; show queue [ ]; create queue ; alter queue set param = , ...; drop queue ; show consumer [ [on ] ]; register consumer [on | at | copy ]* ; unregister consumer [from ]; register subconsumer for [on ]; unregister subconsumer for [from ] [close [batch]]; show node [ [on ] ]; show table ; show sequence ; Following commands expect default queue: show batch ; show batch ; Londiste commands: londiste add table [ , ... ] with skip_truncate, tgflags='UIDBAQL', expect_sync, no_triggers, -- pass trigger args: backup, skip, when='EXPR', ev_XX='EXPR'; londiste add sequence ; londiste remove table [ , ... ]; londiste remove sequence [ , ... ]; londiste tables; londiste seqs; londiste missing; Other commands: exit; - quit program ^D - quit program ^C - clear current buffer """ # unimplemented: """ create node location [on ]; drop node [on ]; alter node [location=] show_queue_stats ; change provider drop node status rename node node create create root_node ; create branch_node ; create leaf_node ; alter node provider ; alter node takeover with all; alter node rename ; takeover ; drop node ; show node [ [on ] ]; show cascade; """ cmdline_usage = '''\ Usage: qadmin [switches] Initial connection options: -h host -p port -U user -d dbname -Q queuename Command options: -c cmd_string -f execfile General options: --help --version ''' import sys, os, readline, getopt, re, psycopg2, traceback import pkgloader pkgloader.require('skytools', '3.0') import skytools __version__ = skytools.__version__ script = None IGNORE_HOSTS = { 'ip6-allhosts': 1, 'ip6-allnodes': 1, 'ip6-allrouters': 1, #'ip6-localhost': 1, 'ip6-localnet': 1, 'ip6-loopback': 1, 'ip6-mcastprefix': 1, } _ident_rx =''' ( " ( "" | [^"]+ )* " ) | ( [a-z_][a-z0-9_]* ) | [.] | (?P .) ''' _ident_rc = re.compile(_ident_rx, re.X | re.I) def unquote_any(typ, s): global _ident_rc if typ == 'ident': res = [] pos = 0 while 1: m = _ident_rc.match(s, pos) if not m: break if m.group('err'): raise Exception('invalid syntax for ident') s1 = m.group() if s1[0] == '"': s1 = s1[1:-1].replace('""', '"') res.append(s1) pos = m.end() s = ''.join(res) elif typ == 'str' or typ == 'dolq': s = skytools.unquote_literal(s, True) return s def normalize_any(typ, s): if typ == 'ident' and s.find('"') < 0: s = s.lower() return s def display_result(curs, desc, fields = []): """Display multirow query as a table.""" rows = curs.fetchall() if not fields: fields = [f[0] for f in curs.description] widths = [10] * len(fields) for i, f in enumerate(fields): rlen = len(f) if rlen > widths[i]: widths[i] = rlen for row in rows: for i, k in enumerate(fields): rlen = row[k] and len(str(row[k])) or 0 if rlen > widths[i]: widths[i] = rlen widths = [w + 2 for w in widths] fmt = '%%-%ds' * (len(widths) - 1) + '%%s' fmt = fmt % tuple(widths[:-1]) if desc: print(desc) print(fmt % tuple(fields)) print(fmt % tuple([ '-' * (w - 2) for w in widths ])) for row in rows: print(fmt % tuple([row[k] for k in fields])) print('') ## ## Base token classes ## class Token: """Base class for tokens. The optional 'param' kwarg will set corresponding key in 'params' dict to final token value. """ # string to append to completions c_append = ' ' # token type to accept tk_type = ("ident", "dolq", "str", "num", "sym") # skipped: numarg, pyold, pynew def __init__(self, next = None, name = None, append = 0): self.next = next self.name = name self._append = append # top-level api def get_next(self, typ, word, params): """Return next token if 'word' matches this token.""" if not self.is_acceptable(typ, word): return None self.set_param(typ, word, params) return self.next def get_completions(self, params): """Return list of all completions possible at this point.""" wlist = self.get_wlist() comp_list = [w + self.c_append for w in wlist] return comp_list # internal api def get_wlist(self): """Return list of potential words at this point.""" return [] def set_param(self, typ, word, params): # now set special param if not self.name: return uw = unquote_any(typ, word) if self._append: lst = params.setdefault(self.name, []) lst.append(uw) else: params[self.name] = uw def is_acceptable(self, tok, word): if tok not in self.tk_type: return False return True class Exact(Token): """Single fixed token.""" def __init__(self, value, next, **kwargs): Token.__init__(self, next, **kwargs) self.value = value def get_wlist(self): return [self.value] def is_acceptable(self, typ, word): if not Token.is_acceptable(self, typ, word): return False return word == self.value class List(Token): """List of Tokens, will be tried sequentially until one matches.""" def __init__(self, *args, **kwargs): Token.__init__(self, **kwargs) self.tok_list = list(args) def add(self, *args): for a in args: self.tok_list.append(a) def get_next(self, typ, word, params): for w in self.tok_list: n = w.get_next(typ, word, params) if n: self.set_param(typ, word, params) return n return None def get_completions(self, params): comp_list = [] for w in self.tok_list: comp_list += w.get_completions(params) return comp_list ## ## Dynamic token classes ## class ConnstrPassword(Token): tk_type = ("str", "num", "ident") class StrValue(Token): tk_type = ("str",) class NumValue(Token): tk_type = ("num",) class Word(Exact): """Single fixed keyword.""" tk_type = ("ident",) class Name(Token): """Dynamically generated list of idents.""" tk_type = ("ident") class Symbol(Exact): """Single fixed symbol.""" tk_type = ("sym",) c_append = '' class XSymbol(Symbol): """Symbol that is not shown in completion.""" def get_wlist(self): return [] class SubConsumerName(Token): tk_type = ("str", "num", "ident") # data-dependant completions class Queue(Name): def get_wlist(self): return script.get_queue_list() class Consumer(Name): def get_wlist(self): return script.get_consumer_list() class DBNode(Name): def get_wlist(self): return script.get_node_list() class Database(Name): def get_wlist(self): return script.get_database_list() class Host(Name): def get_wlist(self): return script.get_host_list() class User(Name): def get_wlist(self): return script.get_user_list() class NewTable(Name): def get_wlist(self): return script.get_new_table_list() class KnownTable(Name): def get_wlist(self): return script.get_known_table_list() class PlainTable(Name): def get_wlist(self): return script.get_plain_table_list() class PlainSequence(Name): def get_wlist(self): return script.get_plain_seq_list() class NewSeq(Name): def get_wlist(self): return script.get_new_seq_list() class KnownSeq(Name): def get_wlist(self): return script.get_known_seq_list() class BatchId(NumValue): def get_wlist(self): return script.get_batch_list() class TickId(NumValue): def get_wlist(self): return [] class Port(NumValue): def get_wlist(self): return ['5432', '6432'] # easier completion - add follow-up symbols class WordEQ(Word): """Word that is followed by '='.""" c_append = '=' def __init__(self, word, next, **kwargs): next = Symbol('=', next) Word.__init__(self, word, next, **kwargs) class WordEQQ(Word): """Word that is followed by '=' and string.""" c_append = "='" def __init__(self, word, next, **kwargs): next = Symbol('=', next) Word.__init__(self, word, next, **kwargs) ## ## Now describe the syntax. ## top_level = List(name = 'cmd') w_done = Symbol(';', top_level) w_xdone = XSymbol(';', top_level) w_sql = List(w_done) w_sql.add(Token(w_sql)) w_connect = List() w_connect.add( WordEQ('dbname', Database(w_connect, name = 'dbname')), WordEQ('host', Host(w_connect, name = 'host')), WordEQ('port', Port(w_connect, name = 'port')), WordEQ('user', User(w_connect, name = 'user')), WordEQ('password', ConnstrPassword(w_connect, name = 'password')), WordEQ('queue', Queue(w_connect, name = 'queue')), WordEQ('node', DBNode(w_connect, name = 'node')), w_done) w_show_batch = List( BatchId(w_done, name = 'batch_id'), Consumer(w_done, name = 'consumer')) w_show_queue = List( Symbol('*', w_done, name = 'queue'), Queue(w_done, name = 'queue'), w_done) w_show_on_queue = List( Symbol('*', w_done, name = 'queue'), Queue(w_done, name = 'queue'), ) w_on_queue = List(Word('on', w_show_on_queue), w_done) w_show_consumer = List( Symbol('*', w_on_queue, name = 'consumer'), Consumer(w_on_queue, name = 'consumer'), w_done) w_show_node = List( Symbol('*', w_on_queue, name = 'node'), DBNode(w_on_queue, name = 'node'), w_done) w_show_table = PlainTable(w_done, name = 'table') w_show_seq = PlainSequence(w_done, name = 'seq') w_show = List( Word('batch', w_show_batch), Word('help', w_done), Word('queue', w_show_queue), Word('consumer', w_show_consumer), Word('node', w_show_node), Word('table', w_show_table), Word('sequence', w_show_seq), Word('version', w_done), name = "cmd2") w_install = List( Word('pgq', w_done), Word('londiste', w_done), name = 'module') # alter queue w_qargs2 = List() w_qargs = List( WordEQQ('idle_period', StrValue(w_qargs2, name = 'ticker_idle_period')), WordEQ('max_count', NumValue(w_qargs2, name = 'ticker_max_count')), WordEQQ('max_lag', StrValue(w_qargs2, name = 'ticker_max_lag')), WordEQ('paused', NumValue(w_qargs2, name = 'ticker_paused'))) w_qargs2.add(w_done) w_qargs2.add(Symbol(',', w_qargs)) w_set_q = Word('set', w_qargs) w_alter_q = List( Symbol('*', w_set_q, name = 'queue'), Queue(w_set_q, name = 'queue')) # alter w_alter = List( Word('queue', w_alter_q), w_sql, name = 'cmd2') # create w_create = List( Word('queue', Queue(w_done, name = 'queue')), w_sql, name = 'cmd2') # drop w_drop = List( Word('queue', Queue(w_done, name = 'queue')), w_sql, name = 'cmd2') # register w_reg_target = List() w_reg_target.add( Word('on', Queue(w_reg_target, name = 'queue')), Word('copy', Consumer(w_reg_target, name = 'copy_reg')), Word('at', TickId(w_reg_target, name = 'at_tick')), w_done) w_cons_on_queue = Word('consumer', Consumer(w_reg_target, name = 'consumer'), name = 'cmd2') w_sub_reg_target = List() w_sub_reg_target.add( Word('on', Queue(w_sub_reg_target, name = 'queue')), Word('for', Consumer(w_sub_reg_target, name = 'consumer')), w_done) w_subcons_on_queue = Word('subconsumer', SubConsumerName(w_sub_reg_target, name = 'subconsumer'), name = 'cmd2') w_register = List(w_cons_on_queue, w_subcons_on_queue) # unregister w_from_queue = List(w_done, Word('from', Queue(w_done, name = 'queue'))) w_cons_from_queue = Word('consumer', List( Symbol('*', w_from_queue, name = 'consumer'), Consumer(w_from_queue, name = 'consumer') ), name = 'cmd2') w_done_close = List(w_done, Word('close', List(w_done, Word('batch', w_done)), name = 'close')) w_from_queue_close = List(w_done_close, Word('from', Queue(w_done_close, name = 'queue'))) w_con_from_queue = Consumer(w_from_queue_close, name = 'consumer') w_subcons_from_queue = Word('subconsumer', List( Symbol('*', Word('for', w_con_from_queue), name = 'subconsumer'), SubConsumerName(Word('for', w_con_from_queue), name = 'subconsumer') ), name = 'cmd2') w_unregister = List(w_cons_from_queue, w_subcons_from_queue) # londiste add table w_table_with2 = List() w_table_with = List( Word('skip_truncate', w_table_with2, name = 'skip_truncate'), Word('expect_sync', w_table_with2, name = 'expect_sync'), Word('backup', w_table_with2, name = 'backup'), Word('skip', w_table_with2, name = 'skip'), Word('no_triggers', w_table_with2, name = 'no_triggers'), WordEQQ('ev_ignore', StrValue(w_table_with2, name = 'ignore')), WordEQQ('ev_type', StrValue(w_table_with2, name = 'ev_type')), WordEQQ('ev_data', StrValue(w_table_with2, name = 'ev_data')), WordEQQ('ev_extra1', StrValue(w_table_with2, name = 'ev_extra1')), WordEQQ('ev_extra2', StrValue(w_table_with2, name = 'ev_extra2')), WordEQQ('ev_extra3', StrValue(w_table_with2, name = 'ev_extra3')), WordEQQ('ev_extra4', StrValue(w_table_with2, name = 'ev_extra4')), WordEQQ('pkey', StrValue(w_table_with2, name = 'pkey')), WordEQQ('when', StrValue(w_table_with2, name = 'when')), WordEQQ('tgflags', StrValue(w_table_with2, name = 'tgflags')) ) w_table_with2.add(w_done) w_table_with2.add(Symbol(',', w_table_with)) w_londiste_add_table = List() w_londiste_add_table2 = List( Symbol(',', w_londiste_add_table), Word('with', w_table_with), w_done) w_londiste_add_table.add( NewTable(w_londiste_add_table2, name = 'tables', append = 1)) # londiste add seq w_londiste_add_seq = List() w_londiste_add_seq2 = List( Symbol(',', w_londiste_add_seq), w_done) w_londiste_add_seq.add( NewSeq(w_londiste_add_seq2, name = 'seqs', append = 1)) # londiste remove table w_londiste_remove_table = List() w_londiste_remove_table2 = List( Symbol(',', w_londiste_remove_table), w_done) w_londiste_remove_table.add( KnownTable(w_londiste_remove_table2, name = 'tables', append = 1)) # londiste remove sequence w_londiste_remove_seq = List() w_londiste_remove_seq2 = List( Symbol(',', w_londiste_remove_seq), w_done) w_londiste_remove_seq.add( KnownSeq(w_londiste_remove_seq2, name = 'seqs', append = 1)) w_londiste_add = List( Word('table', w_londiste_add_table), Word('sequence', w_londiste_add_seq), name = 'cmd3') w_londiste_remove = List( Word('table', w_londiste_remove_table), Word('sequence', w_londiste_remove_seq), name = 'cmd3') # londiste w_londiste = List( Word('add', w_londiste_add), Word('remove', w_londiste_remove), Word('missing', w_done), Word('tables', w_done), Word('seqs', w_done), name = "cmd2") top_level.add( Word('alter', w_alter), Word('connect', w_connect), Word('create', w_create), Word('drop', w_drop), Word('install', w_install), Word('register', w_register), Word('unregister', w_unregister), Word('show', w_show), Word('exit', w_done), Word('londiste', w_londiste), Word('select', w_sql), w_sql) ## ## Main class for keeping the state. ## class AdminConsole: cur_queue = None cur_database = None server_version = None pgq_version = None cmd_file = None cmd_str = None comp_cache = { 'comp_pfx': None, 'comp_list': None, 'queue_list': None, 'database_list': None, 'consumer_list': None, 'host_list': None, 'user_list': None, } db = None initial_connstr = None rc_hosts = re.compile('\s+') def get_queue_list(self): q = "select queue_name from pgq.queue order by 1" return self._ccache('queue_list', q, 'pgq') def get_database_list(self): q = "select datname from pg_catalog.pg_database order by 1" return self._ccache('database_list', q) def get_user_list(self): q = "select usename from pg_catalog.pg_user order by 1" return self._ccache('user_list', q) def get_consumer_list(self): q = "select co_name from pgq.consumer order by 1" return self._ccache('consumer_list', q, 'pgq') def get_node_list(self): q = "select distinct node_name from pgq_node.node_location order by 1" return self._ccache('node_list', q, 'pgq_node') def _new_obj_sql(self, queue, objname, objkind): args = {'queue': skytools.quote_literal(queue), 'obj': objname, 'ifield': objname + '_name', 'itable': 'londiste.' + objname + '_info', 'kind': skytools.quote_literal(objkind), } q = """select quote_ident(n.nspname) || '.' || quote_ident(r.relname) from pg_catalog.pg_class r join pg_catalog.pg_namespace n on (n.oid = r.relnamespace) left join %(itable)s i on (i.queue_name = %(queue)s and i.%(ifield)s = (n.nspname || '.' || r.relname)) where r.relkind = %(kind)s and n.nspname not in ('pg_catalog', 'information_schema', 'pgq', 'londiste', 'pgq_node', 'pgq_ext') and n.nspname !~ 'pg_.*' and i.%(ifield)s is null union all select londiste.quote_fqname(%(ifield)s) from %(itable)s where queue_name = %(queue)s and not local order by 1 """ % args return q def get_new_table_list(self): if not self.cur_queue: return [] q = self._new_obj_sql(self.cur_queue, 'table', 'r') return self._ccache('new_table_list', q, 'londiste') def get_new_seq_list(self): if not self.cur_queue: return [] q = self._new_obj_sql(self.cur_queue, 'seq', 'S') return self._ccache('new_seq_list', q, 'londiste') def get_known_table_list(self): if not self.cur_queue: return [] qname = skytools.quote_literal(self.cur_queue) q = "select londiste.quote_fqname(table_name)"\ " from londiste.table_info"\ " where queue_name = %s order by 1" % qname return self._ccache('known_table_list', q, 'londiste') def get_known_seq_list(self): if not self.cur_queue: return [] qname = skytools.quote_literal(self.cur_queue) q = "select londiste.quote_fqname(seq_name)"\ " from londiste.seq_info"\ " where queue_name = %s order by 1" % qname return self._ccache('known_seq_list', q, 'londiste') def get_plain_table_list(self): q = "select quote_ident(n.nspname) || '.' || quote_ident(r.relname)"\ " from pg_class r join pg_namespace n on (n.oid = r.relnamespace)"\ " where r.relkind = 'r' "\ " and n.nspname not in ('pg_catalog', 'information_schema', 'pgq', 'londiste', 'pgq_node', 'pgq_ext') "\ " and n.nspname !~ 'pg_.*' "\ " order by 1" return self._ccache('plain_table_list', q) def get_plain_seq_list(self): q = "select quote_ident(n.nspname) || '.' || quote_ident(r.relname)"\ " from pg_class r join pg_namespace n on (n.oid = r.relnamespace)"\ " where r.relkind = 'S' "\ " and n.nspname not in ('pg_catalog', 'information_schema', 'pgq', 'londiste', 'pgq_node', 'pgq_ext') "\ " order by 1" return self._ccache('plain_seq_list', q) def get_batch_list(self): if not self.cur_queue: return [] qname = skytools.quote_literal(self.cur_queue) q = "select current_batch::text from pgq.get_consumer_info(%s)"\ " where current_batch is not null order by 1" % qname return self._ccache('batch_list', q, 'pgq') def _ccache(self, cname, q, req_schema = None): if not self.db: return [] # check if schema exists if req_schema: k = "schema_exists_%s" % req_schema ok = self.comp_cache.get(k) if ok is None: curs = self.db.cursor() ok = skytools.exists_schema(curs, req_schema) self.comp_cache[k] = ok if not ok: return [] # actual completion clist = self.comp_cache.get(cname) if clist is None: curs = self.db.cursor() curs.execute(q) clist = [r[0] for r in curs.fetchall()] self.comp_cache[cname] = clist return clist def get_host_list(self): clist = self.comp_cache.get('host_list') if clist is None: try: f = open('/etc/hosts', 'r') clist = [] while 1: ln = f.readline() if not ln: break ln = ln.strip() if ln == '' or ln[0] == '#': continue lst = self.rc_hosts.split(ln) for h in lst[1:]: if h not in IGNORE_HOSTS: clist.append(h) clist.sort() self.comp_cache['host_list'] = clist except IOError: clist = [] return clist def parse_cmdline(self, argv): switches = "c:h:p:d:U:f:Q:" lswitches = ['help', 'version'] try: opts, args = getopt.getopt(argv, switches, lswitches) except getopt.GetoptError, ex: print str(ex) print "Use --help to see command line options" sys.exit(1) cstr_map = { 'dbname': None, 'host': None, 'port': None, 'user': None, 'password': None, } cmd_file = cmd_str = None for o, a in opts: if o == "--help": print cmdline_usage sys.exit(0) elif o == "--version": print "qadmin version %s" % __version__ sys.exit(0) elif o == "-h": cstr_map['host'] = a elif o == "-p": cstr_map['port'] = a elif o == "-d": cstr_map['dbname'] = a elif o == "-U": cstr_map['user'] = a elif o == "-Q": self.cur_queue = a elif o == "-c": self.cmd_str = a elif o == "-f": self.cmd_file = a cstr_list = [] for k, v in cstr_map.items(): if v is not None: cstr_list.append("%s=%s" % (k, v)) if len(args) == 1: a = args[0] if a.find('=') >= 0: cstr_list.append(a) else: cstr_list.append("dbname=%s" % a) elif len(args) > 1: print "too many arguments, use --help to see syntax" sys.exit(1) self.initial_connstr = " ".join(cstr_list) def db_connect(self, connstr, quiet=False): db = skytools.connect_database(connstr) db.set_isolation_level(0) # autocommit q = "select current_database(), current_setting('server_version')" curs = db.cursor() curs.execute(q) res = curs.fetchone() self.cur_database = res[0] self.server_version = res[1] q = "select pgq.version()" try: curs.execute(q) res = curs.fetchone() self.pgq_version = res[0] except psycopg2.ProgrammingError: self.pgq_version = "" if not quiet: print "qadmin (%s, server %s, pgq %s)" % (__version__, self.server_version, self.pgq_version) #print "Connected to %r" % connstr return db def run(self, argv): self.parse_cmdline(argv) if self.cmd_file is not None and self.cmd_str is not None: print "cannot handle -c and -f together" sys.exit(1) # append ; to cmd_str if needed if self.cmd_str and not self.cmd_str.rstrip().endswith(';'): self.cmd_str += ';' cmd_str = self.cmd_str if self.cmd_file: cmd_str = open(self.cmd_file, "r").read() try: self.db = self.db_connect(self.initial_connstr, quiet=True) except psycopg2.Error, d: print str(d).strip() sys.exit(1) if cmd_str: self.exec_string(cmd_str) else: self.main_loop() def main_loop(self): readline.parse_and_bind('tab: complete') readline.set_completer(self.rl_completer_safe) #print 'delims: ', repr(readline.get_completer_delims()) # remove " from delims #readline.set_completer_delims(" \t\n`~!@#$%^&*()-=+[{]}\\|;:',<>/?") hist_file = os.path.expanduser("~/.qadmin_history") try: readline.read_history_file(hist_file) except IOError: pass print "Welcome to qadmin %s (server %s), the PgQ interactive terminal." % (__version__, self.server_version) print "Use 'show help;' to see available commands." while 1: try: ln = self.line_input() self.exec_string(ln) except KeyboardInterrupt: print except EOFError: print break except psycopg2.Error, d: print 'ERROR:', str(d).strip() except Exception: traceback.print_exc() self.reset_comp_cache() try: readline.write_history_file(hist_file) except IOError: pass def rl_completer(self, curword, state): curline = readline.get_line_buffer() start = readline.get_begidx() end = readline.get_endidx() pfx = curline[:start] sglist = self.find_suggestions(pfx, curword) if state < len(sglist): return sglist[state] return None def rl_completer_safe(self, curword, state): try: return self.rl_completer(curword, state) except BaseException, det: print 'got some error', str(det) def line_input(self): qname = "(noqueue)" if self.cur_queue: qname = self.cur_queue p = "%s@%s> " % (qname, self.cur_database) return raw_input(p) def sql_words(self, sql): return skytools.sql_tokenizer(sql, standard_quoting = True, fqident = True, show_location = True, ignore_whitespace = True) def reset_comp_cache(self): self.comp_cache = {} def find_suggestions(self, pfx, curword, params = {}): # refresh word cache c_pfx = self.comp_cache.get('comp_pfx') c_list = self.comp_cache.get('comp_list', []) c_pos = self.comp_cache.get('comp_pos') if c_pfx != pfx: c_list, c_pos = self.find_suggestions_real(pfx, params) orig_pos = c_pos while c_pos < len(pfx) and pfx[c_pos].isspace(): c_pos += 1 #print repr(pfx), orig_pos, c_pos self.comp_cache['comp_pfx'] = pfx self.comp_cache['comp_list'] = c_list self.comp_cache['comp_pos'] = c_pos skip = len(pfx) - c_pos if skip: curword = pfx[c_pos : ] + curword # generate suggestions wlen = len(curword) res = [] for cword in c_list: if curword == cword[:wlen]: res.append(cword) # resync with readline offset if skip: res = [s[skip:] for s in res] #print '\nfind_suggestions', repr(pfx), repr(curword), repr(res), repr(c_list) return res def find_suggestions_real(self, pfx, params): # find level node = top_level pos = 0 xpos = 0 xnode = node for typ, w, pos in self.sql_words(pfx): w = normalize_any(typ, w) node = node.get_next(typ, w, params) if not node: break xnode = node xpos = pos # find possible matches if xnode: return (xnode.get_completions(params), xpos) else: return ([], xpos) def exec_string(self, ln, eof = False): node = top_level params = {} self.tokens = [] for typ, w, pos in self.sql_words(ln): self.tokens.append((typ, w)) w = normalize_any(typ, w) if typ == 'error': print 'syntax error 1:', repr(ln) return onode = node node = node.get_next(typ, w, params) if not node: print "syntax error 2:", repr(ln), repr(typ), repr(w), repr(params) return if node == top_level: self.exec_params(params) params = {} self.tokens = [] if eof: if params: self.exec_params(params) elif node != top_level: print "multi-line commands not supported:", repr(ln) def exec_params(self, params): #print 'RUN', params cmd = params.get('cmd') cmd2 = params.get('cmd2') cmd3 = params.get('cmd3') if not cmd: print 'parse error: no command found' return if cmd2: cmd = "%s_%s" % (cmd, cmd2) if cmd3: cmd = "%s_%s" % (cmd, cmd3) #print 'RUN', repr(params) fn = getattr(self, 'cmd_' + cmd, self.execute_sql) fn(params) def cmd_connect(self, params): qname = params.get('queue', self.cur_queue) if 'node' in params and not qname: print 'node= needs a queue also' return # load raw connection params cdata = [] for k in ('dbname', 'host', 'port', 'user', 'password'): if k in params: arg = "%s=%s" % (k, params[k]) cdata.append(arg) # raw connect if cdata: if 'node' in params: print 'node= cannot be used together with raw params' return cstr = " ".join(cdata) self.db = self.db_connect(cstr) # connect to queue if qname: curs = self.db.cursor() q = "select queue_name from pgq.get_queue_info(%s)" curs.execute(q, [qname]) res = curs.fetchall() if len(res) == 0: print 'queue not found' return if 'node' in params: q = "select node_location from pgq_node.get_queue_locations(%s)"\ " where node_name = %s" curs.execute(q, [qname, params['node']]) res = curs.fetchall() if len(res) == 0: print "node not found" return cstr = res[0]['node_location'] self.db = self.db_connect(cstr) # set default queue if 'queue' in params: self.cur_queue = qname print "CONNECT" def cmd_show_version (self, params): print "qadmin version %s" % __version__ print "server version %s" % self.server_version print "pgq version %s" % self.pgq_version def cmd_install(self, params): pgq_objs = [ skytools.DBLanguage("plpgsql"), #skytools.DBFunction("txid_current_snapshot", 0, sql_file="txid.sql"), skytools.DBSchema("pgq", sql_file="pgq.sql"), skytools.DBSchema("pgq_ext", sql_file="pgq_ext.sql"), skytools.DBSchema("pgq_node", sql_file="pgq_node.sql"), skytools.DBSchema("pgq_coop", sql_file="pgq_coop.sql"), ] londiste_objs = pgq_objs + [ skytools.DBSchema("londiste", sql_file="londiste.sql"), ] mod_map = { 'londiste': londiste_objs, 'pgq': pgq_objs, } mod_name = params['module'] objs = mod_map[mod_name] if not self.db: print "no db?" return curs = self.db.cursor() skytools.db_install(curs, objs, None) print "INSTALL" def cmd_show_queue(self, params): queue = params.get('queue') if queue is None: # "show queue" without args, show all if not connected to # specific queue queue = self.cur_queue if not queue: queue = '*' curs = self.db.cursor() fields = [ "queue_name", "queue_cur_table || '/' || queue_ntables as tables", "queue_ticker_max_count as max_count", "queue_ticker_max_lag as max_lag", "queue_ticker_idle_period as idle_period", "queue_ticker_paused as paused", "ticker_lag", "ev_per_sec", "ev_new", ] pfx = "select " + ",".join(fields) if queue == '*': q = pfx + " from pgq.get_queue_info()" curs.execute(q) else: q = pfx + " from pgq.get_queue_info(%s)" curs.execute(q, [queue]) display_result(curs, 'Queue "%s":' % queue) def cmd_show_consumer(self, params): """Show consumer status""" consumer = params.get('consumer', '*') queue = params.get('queue', '*') q_queue = (queue != '*' and queue or None) q_consumer = (consumer != '*' and consumer or None) curs = self.db.cursor() q = "select * from pgq.get_consumer_info(%s, %s)" curs.execute(q, [q_queue, q_consumer]) display_result(curs, 'Consumer "%s" on queue "%s":' % (consumer, queue)) def cmd_show_node(self, params): """Show node information.""" # TODO: This should additionally show node roles, lags and hierarchy. # Similar to londiste "status". node = params.get('node', '*') queue = params.get('queue', '*') q_queue = (queue != '*' and queue or None) q_node = (node != '*' and node or None) curs = self.db.cursor() q = """select queue_name, node_name, node_location, dead from pgq_node.node_location where node_name = coalesce(%s, node_name) and queue_name = coalesce(%s, queue_name) order by 1,2""" curs.execute(q, [q_node, q_queue]) display_result(curs, 'Node "%s" on queue "%s":' % (node, queue)) def cmd_show_batch(self, params): batch_id = params.get('batch_id') consumer = params.get('consumer') queue = self.cur_queue if not queue: print 'No default queue' return curs = self.db.cursor() if consumer: q = "select current_batch from pgq.get_consumer_info(%s, %s)" curs.execute(q, [queue, consumer]) res = curs.fetchall() if len(res) != 1: print 'no such consumer' return batch_id = res[0]['current_batch'] if batch_id is None: print 'consumer has no open batch' return q = "select * from pgq.get_batch_events(%s)" curs.execute(q, [batch_id]) display_result(curs, 'Batch events:') def cmd_register_consumer(self, params): queue = params.get("queue", self.cur_queue) if not queue: print 'No queue specified' return at_tick = params.get('at_tick') copy_reg = params.get('copy_reg') consumer = params['consumer'] curs = self.db.cursor() # copy other registration if copy_reg: q = "select coalesce(next_tick, last_tick) as pos from pgq.get_consumer_info(%s, %s)" curs.execute(q, [queue, copy_reg]) reg = curs.fetchone() if not reg: print "Consumer %s not registered on queue %d" % (copy_reg, queue) return at_tick = reg['pos'] # avoid double reg if specific pos is not requested if not at_tick: q = "select * from pgq.get_consumer_info(%s, %s)" curs.execute(q, [queue, consumer]) if curs.fetchone(): print 'Consumer already registered' return if at_tick: q = "select * from pgq.register_consumer_at(%s, %s, %s)" curs.execute(q, [queue, consumer, int(at_tick)]) else: q = "select * from pgq.register_consumer(%s, %s)" curs.execute(q, [queue, consumer]) print "REGISTER" def cmd_register_subconsumer(self, params): queue = params.get("queue", self.cur_queue) if not queue: print 'No queue specified' return subconsumer = params['subconsumer'] consumer = params.get("consumer") if not consumer: print 'No consumer specified' return curs = self.db.cursor() _subcon_name = '%s.%s' % (consumer, subconsumer) q = "select * from pgq.get_consumer_info(%s, %s)" curs.execute(q, [queue, _subcon_name]) if curs.fetchone(): print 'Subconsumer already registered' return q = "select * from pgq_coop.register_subconsumer(%s, %s, %s)" curs.execute(q, [queue, consumer, subconsumer]) print "REGISTER" def cmd_unregister_consumer(self, params): queue = params.get("queue", self.cur_queue) if not queue: print 'No queue specified' return consumer = params['consumer'] curs = self.db.cursor() if consumer == '*': q = 'select consumer_name from pgq.get_consumer_info(%s)' curs.execute(q, [queue]) consumers = [row['consumer_name'] for row in curs.fetchall()] else: consumers = [consumer] q = "select * from pgq.unregister_consumer(%s, %s)" for consumer in consumers: curs.execute(q, [queue, consumer]) print "UNREGISTER" def cmd_unregister_subconsumer(self, params): queue = params.get("queue", self.cur_queue) if not queue: print 'No queue specified' return subconsumer = params["subconsumer"] consumer = params['consumer'] batch_handling = int(params.get('close') is not None) curs = self.db.cursor() if subconsumer == '*': q = 'select consumer_name from pgq.get_consumer_info(%s)' curs.execute(q, [queue]) subconsumers = [row['consumer_name'].split('.')[1] for row in curs.fetchall() if row['consumer_name'].startswith('%s.' % consumer)] else: subconsumers = [subconsumer] q = "select * from pgq_coop.unregister_subconsumer(%s, %s, %s, %s)" for subconsumer in subconsumers: curs.execute(q, [queue, consumer, subconsumer, batch_handling]) print "UNREGISTER" def cmd_create_queue(self, params): curs = self.db.cursor() q = "select * from pgq.get_queue_info(%(queue)s)" curs.execute(q, params) if curs.fetchone(): print "Queue already exists" return q = "select * from pgq.create_queue(%(queue)s)" curs.execute(q, params) print "CREATE" def cmd_drop_queue(self, params): curs = self.db.cursor() q = "select * from pgq.drop_queue(%(queue)s)" curs.execute(q, params) print "DROP" def cmd_alter_queue(self, params): """Alter queue parameters, accepts * for all queues""" queue = params.get('queue') curs = self.db.cursor() if queue == '*': # operate on list of queues q = "select queue_name from pgq.get_queue_info()" curs.execute(q) qlist = [ r[0] for r in curs.fetchall() ] else: # just single queue specified qlist = [ queue ] for qname in qlist: params['queue'] = qname # loop through the parameters, passing any unrecognized # key down pgq.set_queue_config for k in params: if k in ('queue', 'cmd', 'cmd2'): continue q = "select * from pgq.set_queue_config" \ "(%%(queue)s, '%s', %%(%s)s)" % (k, k) curs.execute(q, params) print "ALTER" def cmd_show_help(self, params): print __doc__ def cmd_exit(self, params): sys.exit(0) ## ## Londiste ## def cmd_londiste_missing(self, params): """Show missing objects.""" queue = self.cur_queue curs = self.db.cursor() q = """select * from londiste.local_show_missing(%s)""" curs.execute(q, [queue]) display_result(curs, 'Missing objects on queue "%s":' % (queue)) def cmd_londiste_tables(self, params): """Show local tables.""" queue = self.cur_queue curs = self.db.cursor() q = """select * from londiste.get_table_list(%s) where local""" curs.execute(q, [queue]) display_result(curs, 'Local tables on queue "%s":' % (queue)) def cmd_londiste_seqs(self, params): """Show local seqs.""" queue = self.cur_queue curs = self.db.cursor() q = """select * from londiste.get_seq_list(%s) where local""" curs.execute(q, [queue]) display_result(curs, 'Sequences on queue "%s":' % (queue)) def cmd_londiste_add_table(self, params): """Add table.""" args = [] for a in ('skip_truncate', 'expect_sync', 'backup', 'no_triggers', 'skip'): if a in params: args.append(a) for a in ('tgflags', 'ignore', 'pkey', 'when', 'ev_type', 'ev_data', 'ev_extra1', 'ev_extra2', 'ev_extra3', 'ev_extra4'): if a in params: args.append("%s=%s" % (a, params[a])) curs = self.db.cursor() q = """select * from londiste.local_add_table(%s, %s, %s)""" for tbl in params['tables']: curs.execute(q, [self.cur_queue, tbl, args]) res = curs.fetchone() print res[0], res[1] print 'ADD_TABLE' def cmd_londiste_remove_table(self, params): """Remove table.""" curs = self.db.cursor() q = """select * from londiste.local_remove_table(%s, %s)""" for tbl in params['tables']: curs.execute(q, [self.cur_queue, tbl]) res = curs.fetchone() print res[0], res[1] print 'REMOVE_TABLE' def cmd_londiste_add_seq(self, params): """Add seq.""" curs = self.db.cursor() q = """select * from londiste.local_add_seq(%s, %s)""" for seq in params['seqs']: curs.execute(q, [self.cur_queue, seq]) res = curs.fetchone() print res[0], res[1] print 'ADD_SEQ' def cmd_londiste_remove_seq(self, params): """Remove seq.""" curs = self.db.cursor() q = """select * from londiste.local_remove_seq(%s, %s)""" for seq in params['seqs']: curs.execute(q, [self.cur_queue, seq]) res = curs.fetchone() print res[0], res[1] print 'REMOVE_SEQ:', res[0], res[1] ## generic info def cmd_show_table(self, params): print '-' * 64 tbl = params['table'] curs = self.db.cursor() s = skytools.TableStruct(curs, tbl) s.create(fakecurs(), skytools.T_ALL) print '-' * 64 def cmd_show_sequence(self, params): print '-' * 64 seq = params['seq'] curs = self.db.cursor() s = skytools.SeqStruct(curs, seq) s.create(fakecurs(), skytools.T_ALL) print '-' * 64 ## sql pass-through def execute_sql(self, params): tks = [tk[1] for tk in self.tokens] sql = ' '.join(tks) curs = self.db.cursor() curs.execute(sql) if curs.description: display_result(curs, None) print curs.statusmessage class fakecurs: def execute(self, sql): print sql def main(): global script script = AdminConsole() script.run(sys.argv[1:]) if __name__ == '__main__': main() skytools-3.2.6/python/pkgloader.py0000644000000000000000000000462512426435645014164 0ustar """Loader for Skytools modules. Primary idea is to allow several major versions to co-exists. Secondary idea - allow checking minimal minor version. """ import sys, os, os.path, re __all__ = ['require'] _top = os.path.dirname(os.path.abspath(os.path.normpath(__file__))) _pkg_cache = None _import_cache = {} _pat = re.compile('^([a-z]+)-([0-9]+).([0-9]+)$') def _load_pkg_cache(): global _pkg_cache if _pkg_cache is not None: return _pkg_cache _pkg_cache = {} for dir in os.listdir(_top): m = _pat.match(dir) if not m: continue modname = m.group(1) modver = (int(m.group(2)), int(m.group(3))) _pkg_cache.setdefault(modname, []).append((modver, dir)) for vlist in _pkg_cache.itervalues(): vlist.sort(reverse = True) return _pkg_cache def _install_path(pkg, newpath): for p in sys.path: pname = os.path.basename(p) m = _pat.match(pname) if m and m.group(1) == pkg: sys.path.remove(p) sys.path.insert(0, newpath) def require(pkg, reqver): # parse arg reqval = tuple([int(n) for n in reqver.split('.')]) need = reqval[:2] # cut minor ver # check if we already have one installed if pkg in _import_cache: got = _import_cache[pkg] if need[0] != got[0] or reqval > got: raise ImportError("Request for package '%s' ver '%s', have '%s'" % ( pkg, reqver, '.'.join(got))) return # pick best ver from available ones cache = _load_pkg_cache() if pkg not in cache: return for pkgver, pkgdir in cache[pkg]: if pkgver[0] == need[0] and pkgver >= need: # install the best on _install_path(pkg, os.path.join(_top, pkgdir)) break inst_ver = reqval # now import whatever is available mod = __import__(pkg) # check if it is actually useful ver_str = mod.__version__ for i, c in enumerate(ver_str): if c != '.' and not c.isdigit(): ver_str = ver_str[:i] break full_ver = tuple([int(x) for x in ver_str.split('.')]) if full_ver[0] != reqval[0] or reqval > full_ver: raise ImportError("Request for package '%s' ver '%s', have '%s'" % ( pkg, reqver, '.'.join(full_ver))) inst_ver = full_ver # remember full version _import_cache[pkg] = inst_ver return mod skytools-3.2.6/python/walmgr.py0000755000000000000000000030156112426435645013507 0ustar #! /usr/bin/env python """WALShipping manager. walmgr INI COMMAND [-n] Master commands: setup Configure PostgreSQL for WAL archiving sync Copies in-progress WALs to slave syncdaemon Daemon mode for regular syncing stop Stop archiving - de-configure PostgreSQL periodic Run periodic command if configured. synch-standby Manage synchronous streaming replication. Slave commands: boot Stop playback, accept queries pause Just wait, don't play WAL-s continue Start playing WAL-s again createslave Create streaming replication slave Common commands: init Create configuration files, set up ssh keys. listbackups List backups. backup Copies all master data to slave. Will keep backup history if slave keep_backups is set. EXPERIMENTAL: If run on slave, creates backup from in-recovery slave data. restore [set][dst] Stop postmaster, move new data dir to right location and start postmaster in playback mode. Optionally use [set] as the backupset name to restore. In this case the directory is copied, not moved. cleanup Cleanup any walmgr files after stop. Internal commands: xarchive archive one WAL file (master) xrestore restore one WAL file (slave) xlock Obtain backup lock (master) xrelease Release backup lock (master) xrotate Rotate backup sets, expire and archive oldest if necessary. xpurgewals Remove WAL files not needed for backup (slave) xpartialsync Append data to WAL file (slave) """ import os, sys, re, signal, time, traceback import errno, glob, ConfigParser, shutil, subprocess import pkgloader pkgloader.require('skytools', '3.0') import skytools DEFAULT_PG_VERSION = "8.3" XLOG_SEGMENT_SIZE = 16 * 1024**2 def usage(err): if err > 0: print >>sys.stderr, __doc__ else: print __doc__ sys.exit(err) def die(err,msg): print >> sys.stderr, msg sys.exit(err) def yesno(prompt): """Ask a Yes/No question""" while True: sys.stderr.write(prompt + " ") sys.stderr.flush() answer = sys.stdin.readline() if not answer: return False answer = answer.strip().lower() if answer in ('yes','y'): return True if answer in ('no','n'): return False sys.stderr.write("Please answer yes or no.\n") def copy_conf(src, dst): """Copy config file or symlink. Does _not_ overwrite target. """ if os.path.isdir(dst): dst = os.path.join(dst, os.path.basename(src)) if os.path.exists(dst): return False if os.path.islink(src): linkdst = os.readlink(src) os.symlink(linkdst, dst) elif os.path.isfile(src): shutil.copy2(src, dst) else: raise Exception("Unsupported file type: %s" % src) return True class WalChunk: """Represents a chunk of WAL used in record based shipping""" def __init__(self,filename,pos=0,bytes=0): self.filename = filename self.pos = pos self.bytes = bytes self.start_time = time.time() self.sync_count = 0 self.sync_time = 0.0 def __str__(self): return "%s @ %d +%d" % (self.filename, self.pos, self.bytes) class PgControlData: """Contents of pg_controldata""" def __init__(self, bin_dir, data_dir, findRestartPoint): """Collect last checkpoint information from pg_controldata output""" self.xlogid = None self.xrecoff = None self.timeline = None self.wal_size = None self.wal_name = None self.cluster_state = None self.is_shutdown = False self.pg_version = 0 self.is_valid = False try: pg_controldata = os.path.join(bin_dir, "pg_controldata") pipe = subprocess.Popen([ pg_controldata, data_dir ], stdout=subprocess.PIPE) except OSError: # don't complain if we cannot execute it return matches = 0 for line in pipe.stdout.readlines(): if findRestartPoint: m = re.match("^Latest checkpoint's REDO location:\s+([0-9A-F]+)/([0-9A-F]+)", line) else: m = re.match("^Latest checkpoint location:\s+([0-9A-F]+)/([0-9A-F]+)", line) if m: matches += 1 self.xlogid = int(m.group(1), 16) self.xrecoff = int(m.group(2), 16) m = re.match("^Latest checkpoint's TimeLineID:\s+(\d+)", line) if m: matches += 1 self.timeline = int(m.group(1)) m = re.match("^Bytes per WAL segment:\s+(\d+)", line) if m: matches += 1 self.wal_size = int(m.group(1)) m = re.match("^pg_control version number:\s+(\d+)", line) if m: matches += 1 self.pg_version = int(m.group(1)) m = re.match("^Database cluster state:\s+(.*$)", line) if m: matches += 1 self.cluster_state = m.group(1) self.is_shutdown = (self.cluster_state == "shut down") # ran successfully and we got our needed matches if pipe.wait() == 0 and matches == 5: self.wal_name = "%08X%08X%08X" % \ (self.timeline, self.xlogid, self.xrecoff / self.wal_size) self.is_valid = True class BackupLabel: """Backup label contents""" def __init__(self, backupdir): """Initialize a new BackupLabel from existing file""" filename = os.path.join(backupdir, "backup_label") self.first_wal = None self.start_time = None self.label_string = None if not os.path.exists(filename): return for line in open(filename): m = re.match('^START WAL LOCATION: [^\s]+ \(file ([0-9A-Z]+)\)$', line) if m: self.first_wal = m.group(1) m = re.match('^START TIME:\s(.*)$', line) if m: self.start_time = m.group(1) m = re.match('^LABEL: (.*)$', line) if m: self.label_string = m.group(1) class Pgpass: """Manipulate pgpass contents""" def __init__(self, passfile): """Load .pgpass contents""" self.passfile = os.path.expanduser(passfile) self.contents = [] if os.path.isfile(self.passfile): self.contents = open(self.passfile).readlines() def split_pgpass_line(selg, pgline): """Parses pgpass line, returns dict""" try: (host, port, db, user, pwd) = pgline.rstrip('\n\r').split(":") return {'host': host, 'port': port, 'db': db, 'user': user, 'pwd': pwd} except ValueError: return None def ensure_user(self, host, port, user, pwd): """Ensure that line for streaming replication exists in .pgpass""" self.remove_user(host, port, user) self.contents.insert(0, '%s:%s:%s:%s:%s\n' % (host, port, 'replication', user, pwd)) def remove_user(self, host, port, user): """Remove all matching lines from .pgpass""" new_contents = [] found = False for l in self.contents: p = self.split_pgpass_line(l) if p and p['host'] == host and p['port'] == port and p['user'] == user and p['db'] == 'replication': found = True continue new_contents.append(l) self.contents = new_contents return found def write(self): """Write contents back to file""" f = open(self.passfile,'w') os.chmod(self.passfile, 0600) f.writelines(self.contents) f.close() class PostgresConfiguration: """Postgres configuration manipulation""" def __init__(self, walmgr, cf_file): """load the configuration from master_config""" self.walmgr = walmgr self.log = walmgr.log self.cf_file = cf_file self.cf_buf = open(self.cf_file, "r").read() def archive_mode(self): """Return value for specified parameter""" # see if explicitly set m = re.search("^\s*archive_mode\s*=\s*'?([a-zA-Z01]+)'?\s*#?.*$", self.cf_buf, re.M | re.I) if m: return m.group(1) # also, it could be commented out as initdb leaves it # it'd probably be best to check from the database ... m = re.search("^#archive_mode\s*=.*$", self.cf_buf, re.M | re.I) if m: return "off" return None def synchronous_standby_names(self): """Return value for specified parameter""" # see if explicitly set m = re.search("^\s*synchronous_standby_names\s*=\s*'([^']*)'\s*#?.*$", self.cf_buf, re.M | re.I) if m: return m.group(1) # also, it could be commented out as initdb leaves it # it'd probably be best to check from the database ... m = re.search("^#synchronous_standby_names\s*=.*$", self.cf_buf, re.M | re.I) if m: return '' return None def wal_level(self): """Return value for specified parameter""" # see if explicitly set m = re.search("^\s*wal_level\s*=\s*'?([a-z_]+)'?\s*#?.*$", self.cf_buf, re.M | re.I) if m: return m.group(1) # also, it could be commented out as initdb leaves it # it'd probably be best to check from the database ... m = re.search("^#wal_level\s*=.*$", self.cf_buf, re.M | re.I) if m: return "minimal" return None def modify(self, cf_params): """Change the configuration parameters supplied in cf_params""" for (param, value) in cf_params.iteritems(): r_active = re.compile("^\s*%s\s*=\s*([^\s#]*).*$" % param, re.M) r_disabled = re.compile("^\s*#\s*%s\s*=.*$" % param, re.M) cf_full = "%s = '%s'" % (param, value) m = r_active.search(self.cf_buf) if m: old_val = m.group(1) self.log.debug("found parameter %s with value %r", param, old_val) self.cf_buf = "%s%s%s" % (self.cf_buf[:m.start()], cf_full, self.cf_buf[m.end():]) else: m = r_disabled.search(self.cf_buf) if m: self.log.debug("found disabled parameter %s", param) self.cf_buf = "%s\n%s%s" % (self.cf_buf[:m.end()], cf_full, self.cf_buf[m.end():]) else: # not found, append to the end self.log.debug("found no value") self.cf_buf = "%s\n%s\n\n" % (self.cf_buf, cf_full) def write(self): """Write the configuration back to file""" cf_old = self.cf_file + ".old" cf_new = self.cf_file + ".new" if self.walmgr.not_really: cf_new = "/tmp/postgresql.conf.new" open(cf_new, "w").write(self.cf_buf) self.log.info("Showing diff") os.system("diff -u %s %s" % (self.cf_file, cf_new)) self.log.info("Done diff") os.remove(cf_new) return # polite method does not work, as usually not enough perms for it open(self.cf_file, "w").write(self.cf_buf) def set_synchronous_standby_names(self,param_value): """Helper function to change synchronous_standby_names and signal postmaster""" self.log.info("Changing synchronous_standby_names from %r to %r", self.synchronous_standby_names(), param_value) cf_params = dict() cf_params['synchronous_standby_names'] = param_value self.modify(cf_params) self.write() data_dir=self.walmgr.cf.getfile("master_data") self.log.info("Sending SIGHUP to postmaster") self.walmgr.signal_postmaster(data_dir, signal.SIGHUP) class WalMgr(skytools.DBScript): def init_optparse(self, parser=None): p = skytools.DBScript.init_optparse(self, parser) p.set_usage(__doc__.strip()) p.add_option("-n", "--not-really", action="store_true", dest="not_really", help = "Don't actually do anything.", default=False) p.add_option("", "--init-master", action="store_true", dest="init_master", help = "Initialize master walmgr.", default=False) p.add_option("", "--slave", action="store", type="string", dest="slave", help = "Slave host name.", default="") p.add_option("", "--pgdata", action="store", type="string", dest="pgdata", help = "Postgres data directory.", default="") p.add_option("", "--config-dir", action="store", type="string", dest="config_dir", help = "Configuration file location for --init-X commands.", default="~/conf") p.add_option("", "--ssh-keygen", action="store_true", dest="ssh_keygen", help = "master: generate SSH key pair if needed", default=False) p.add_option("", "--ssh-add-key", action="store", dest="ssh_add_key", help = "slave: add public key to authorized_hosts", default=False) p.add_option("", "--ssh-remove-key", action="store", dest="ssh_remove_key", help = "slave: remove master key from authorized_hosts", default=False) p.add_option("", "--add-password", action="store", dest="add_password", help = "slave: add password from file to .pgpass. Additional fields will be extracted from primary-conninfo", default=False) p.add_option("", "--remove-password", action="store_true", dest="remove_password", help = "slave: remove previously added line from .pgpass", default=False) p.add_option("", "--primary-conninfo", action="store", dest="primary_conninfo", default=None, help = "slave: connect string for streaming replication master") p.add_option("", "--init-slave", action="store_true", dest="init_slave", help = "Initialize slave walmgr.", default=False) p.add_option("", "--synch-standby", action="store", dest="synchronous_standby_names", default=None, help = "master: do the same thing as command synch-standby, but do not use INI file") return p def load_config(self): """override config load to allow operation without a config file""" if len(self.args) < 1: # no config file, generate default # guess the job name from cmdline options if self.options.init_master: job_name = 'wal-master' elif self.options.init_slave: job_name = 'wal-slave' else: job_name = 'walmgr' # common config settings opt_dict = { 'use_skylog': '0', 'job_name': job_name, } # master configuration settings master_opt_dict = { 'master_db': 'dbname=template1', 'completed_wals': '%%(slave)s:%%(walmgr_data)s/logs.complete', 'partial_wals': '%%(slave)s:%%(walmgr_data)s/logs.partial', 'full_backup': '%%(slave)s:%%(walmgr_data)s/data.master', 'config_backup': '%%(slave)s:%%(walmgr_data)s/config.backup', 'keep_symlinks': '1', 'compression': '0', 'walmgr_data': '~/walshipping', 'logfile': '~/log/%(job_name)s.log', 'pidfile': '~/pid/%(job_name)s.pid', 'use_skylog': '1', } # slave configuration settings slave_opt_dict = { 'completed_wals': '%%(walmgr_data)s/logs.complete', 'partial_wals': '%%(walmgr_data)s/logs.partial', 'full_backup': '%%(walmgr_data)s/data.master', 'config_backup': '%%(walmgr_data)s/config.backup', 'walmgr_data': '~/walshipping', 'logfile': '~/log/%(job_name)s.log', 'pidfile': '~/pid/%(job_name)s.pid', 'use_skylog': '1', } if self.options.init_master: opt_dict.update(master_opt_dict) elif self.options.init_slave: opt_dict.update(slave_opt_dict) self.is_master = self.options.init_master config = skytools.Config(self.service_name, None, user_defs = opt_dict, override = self.cf_override) else: # default to regular config handling config = skytools.DBScript.load_config(self) self.is_master = config.has_option('master_data') # create the log and pid files if needed for cfk in [ "logfile", "pidfile" ]: if config.has_option(cfk): dirname = os.path.dirname(config.getfile(cfk)) if not os.path.isdir(dirname): os.makedirs(dirname) return config def __init__(self, args): skytools.DBScript.__init__(self, 'walmgr', args) self.set_single_loop(1) self.not_really = self.options.not_really self.pg_backup = 0 self.walchunk = None self.script = os.path.abspath(sys.argv[0]) if len(self.args) > 1: # normal operations, cfgfile and command self.cfgfile = self.args[0] self.cmd = self.args[1] self.args = self.args[2:] else: if self.options.init_master: self.cmd = 'init_master' elif self.options.init_slave: self.cmd = 'init_slave' elif self.options.synchronous_standby_names is not None: self.cmd = "synch-standby" else: usage(1) self.cfgfile = None self.args = [] if self.cmd not in ('sync', 'syncdaemon'): # don't let pidfile interfere with normal operations, but # disallow concurrent syncing self.pidfile = None cmdtab = { 'init_master': self.walmgr_init_master, 'init_slave': self.walmgr_init_slave, 'setup': self.walmgr_setup, 'stop': self.master_stop, 'backup': self.run_backup, 'listbackups': self.list_backups, 'restore': self.restore_database, 'periodic': self.master_periodic, 'sync': self.master_sync, 'syncdaemon': self.master_syncdaemon, 'pause': self.slave_pause, 'continue': self.slave_continue, 'boot': self.slave_boot, 'createslave': self.slave_createslave, 'cleanup': self.walmgr_cleanup, 'synch-standby': self.master_synch_standby, 'xlock': self.slave_lock_backups_exit, 'xrelease': self.slave_resume_backups, 'xrotate': self.slave_rotate_backups, 'xpurgewals': self.slave_purge_wals, 'xarchive': self.master_xarchive, 'xrestore': self.xrestore, 'xpartialsync': self.slave_append_partial, } if not cmdtab.has_key(self.cmd): usage(1) self.work = cmdtab[self.cmd] def assert_is_master(self, master_required): if self.is_master != master_required: self.log.warning("Action not available on current node.") sys.exit(1) def pg_start_backup(self, code): q = "select pg_start_backup('FullBackup')" self.log.info("Execute SQL: %s; [%s]", q, self.cf.get("master_db")) if self.not_really: self.pg_backup = 1 return db = self.get_database("master_db") db.cursor().execute(q) db.commit() self.close_database("master_db") self.pg_backup = 1 def pg_stop_backup(self): if not self.pg_backup: return q = "select pg_stop_backup()" self.log.info("Execute SQL: %s; [%s]", q, self.cf.get("master_db")) if self.not_really: return db = self.get_database("master_db") db.cursor().execute(q) db.commit() self.close_database("master_db") def signal_postmaster(self, data_dir, sgn): pidfile = os.path.join(data_dir, "postmaster.pid") if not os.path.isfile(pidfile): self.log.info("postmaster is not running (pidfile not present)") return False buf = open(pidfile, "r").readline() pid = int(buf.strip()) self.log.debug("Signal %d to process %d", sgn, pid) if sgn == 0 or not self.not_really: try: os.kill(pid, sgn) except OSError, ex: if ex.errno == errno.ESRCH: self.log.info("postmaster is not running (no process at indicated PID)") return False else: raise return True def exec_rsync(self,args,die_on_error=False): cmdline = [ "rsync", "-a", "--quiet" ] if self.cf.getint("compression", 0) > 0: cmdline.append("-z") cmdline += args cmd = "' '".join(cmdline) self.log.debug("Execute rsync cmd: %r", cmd) if self.not_really: return 0 res = os.spawnvp(os.P_WAIT, cmdline[0], cmdline) if res == 24: self.log.info("Some files vanished, but thats OK") res = 0 elif res != 0: self.log.fatal("rsync exec failed, res=%d", res) if die_on_error: sys.exit(1) return res def exec_big_rsync(self, args): if self.exec_rsync(args) != 0: self.log.fatal("Big rsync failed") self.pg_stop_backup() sys.exit(1) def rsync_log_directory(self, source_dir, dst_loc): """rsync a pg_log or pg_xlog directory - ignore most of the directory contents, and pay attention to symlinks """ keep_symlinks = self.cf.getint("keep_symlinks", 1) subdir = os.path.basename(source_dir) if not os.path.exists(source_dir): self.log.info("%s does not exist, skipping", subdir) return cmdline = [] # if this is a symlink, copy it's target first if os.path.islink(source_dir) and keep_symlinks: self.log.info('%s is a symlink, attempting to create link target', subdir) # expand the link link = os.readlink(source_dir) if not link.startswith("/"): link = os.path.join(os.getcwd(), link) link_target = os.path.join(link, "") slave_host = self.cf.get("slave") remote_target = "%s:%s" % (slave_host, link_target) options = [ "--include=archive_status", "--exclude=/**" ] if self.exec_rsync( options + [ link_target, remote_target ]): # unable to create the link target, just convert the links # to directories in PGDATA self.log.warning('Unable to create symlinked %s on target, copying', subdir) cmdline += [ "--copy-unsafe-links" ] cmdline += [ "--exclude=pg_log/*" ] cmdline += [ "--exclude=pg_xlog/archive_status/*" ] cmdline += [ "--include=pg_xlog/archive_status" ] cmdline += [ "--exclude=pg_xlog/*" ] self.exec_big_rsync(cmdline + [ source_dir, dst_loc ]) def exec_cmd(self, cmdline, allow_error=False): cmd = "' '".join(cmdline) self.log.debug("Execute cmd: %r", cmd) if self.not_really: return process = subprocess.Popen(cmdline,stdout=subprocess.PIPE) output = process.communicate() res = process.returncode if res != 0 and not allow_error: self.log.fatal("exec failed, res=%d (%r)", res, cmdline) sys.exit(1) return (res,output[0]) def exec_system(self, cmdline): self.log.debug("Execute cmd: %r", cmdline) if self.not_really: return 0 return os.WEXITSTATUS(os.system(cmdline)) def chdir(self, loc): self.log.debug("chdir: %r", loc) if self.not_really: return try: os.chdir(loc) except os.error: self.log.fatal("CHDir failed") self.pg_stop_backup() sys.exit(1) def parse_conninfo(self,conninfo): """Extract host,user and port from primary-conninfo""" m = re.match("^.*\s*host\s*=\s*([^\s]+)\s*.*$", conninfo) if m: host = m.group(1) else: host = 'localhost' m = re.match("^.*\s*user\s*=\s*([^\s]+)\s*.*$", conninfo) if m: user = m.group(1) else: user = os.environ['USER'] m = re.match("^.*\s*port\s*=\s*([^\s]+)\s*.*$", conninfo) if m: port = m.group(1) else: port = '5432' m = re.match("^.*\s*sslmode\s*=\s*([^\s]+)\s*.*$", conninfo) if m: sslmode = m.group(1) else: sslmode = None return host,port,user,sslmode def get_last_complete(self): """Get the name of last xarchived segment.""" data_dir = self.cf.getfile("master_data") fn = os.path.join(data_dir, ".walshipping.last") try: last = open(fn, "r").read().strip() return last except: self.log.info("Failed to read %s", fn) return None def set_last_complete(self, last): """Set the name of last xarchived segment.""" data_dir = self.cf.getfile("master_data") fn = os.path.join(data_dir, ".walshipping.last") fn_tmp = fn + ".new" try: f = open(fn_tmp, "w") f.write(last) f.close() os.rename(fn_tmp, fn) except: self.log.fatal("Cannot write to %s", fn) def master_stop(self): """Deconfigure archiving, attempt to stop syncdaemon""" data_dir = self.cf.getfile("master_data") restart_cmd = self.cf.getfile("master_restart_cmd", "") self.assert_is_master(True) self.log.info("Disabling WAL archiving") self.master_configure_archiving(False, restart_cmd) # if we have a restart command, then use it, otherwise signal if restart_cmd: self.log.info("Restarting postmaster") self.exec_system(restart_cmd) else: self.log.info("Sending SIGHUP to postmaster") self.signal_postmaster(data_dir, signal.SIGHUP) # stop any running syncdaemons pidfile = self.cf.getfile("pidfile", "") if os.path.exists(pidfile): self.log.info('Pidfile %s exists, attempting to stop syncdaemon.', pidfile) self.exec_cmd([self.script, self.cfgfile, "syncdaemon", "-s"]) self.log.info("Done") def walmgr_cleanup(self): """ Clean up any walmgr files on slave and master. """ if not self.is_master: # remove walshipping directory dirname = self.cf.getfile("walmgr_data") self.log.info("Removing walmgr data directory: %s", dirname) if not self.not_really: shutil.rmtree(dirname) # remove backup 8.3/main.X directories backups = glob.glob(self.cf.getfile("slave_data") + ".[0-9]") for dirname in backups: self.log.info("Removing backup main directory: %s", dirname) if not self.not_really: shutil.rmtree(dirname) ssh_dir = os.path.expanduser("~/.ssh") auth_file = os.path.join(ssh_dir, "authorized_keys") if self.options.ssh_remove_key and os.path.isfile(auth_file): # remove master key from ssh authorized keys, simple substring match should do keys = "" for key in open(auth_file): if not self.options.ssh_remove_key in key: keys += key else: self.log.info("Removed %s from %s", self.options.ssh_remove_key, auth_file) self.log.info("Overwriting authorized_keys file") if not self.not_really: tmpfile = auth_file + ".walmgr.tmp" f = open(tmpfile, "w") f.write(keys) f.close() os.rename(tmpfile, auth_file) else: self.log.debug("authorized_keys:\n%s", keys) # remove password from .pgpass primary_conninfo = self.cf.get("primary_conninfo", "") if self.options.remove_password and primary_conninfo and not self.not_really: pg = Pgpass('~/.pgpass') host, port, user, _ = self.parse_conninfo(primary_conninfo) if pg.remove_user(host, port, user): self.log.info("Removing line from .pgpass") pg.write() # get rid of the configuration file, both master and slave self.log.info("Removing config file: %s", self.cfgfile) if not self.not_really: os.remove(self.cfgfile) def master_synch_standby(self): """Manage synchronous_standby_names parameter""" if self.options.synchronous_standby_names is None: if len(self.args) < 1: die(1, "usage: synch-standby SYNCHRONOUS_STANDBY_NAMES") names = self.args[0] self.assert_is_master(True) else: # as synchronous_standby_names is available since 9.1 # we can override DEFAULT_PG_VERSION global DEFAULT_PG_VERSION DEFAULT_PG_VERSION = "9.1" self.guess_locations() self.override_cf_option('master_config', self.postgres_conf) self.override_cf_option('master_data', self.pgdata) self.override_cf_option('master_db', 'dbname=template1') names = self.options.synchronous_standby_names cf = PostgresConfiguration(self, self.cf.getfile("master_config")) # list of slaves db = self.get_database("master_db") cur = db.cursor() cur.execute("select application_name from pg_stat_replication") slave_names = [slave[0] for slave in cur.fetchall()] self.close_database("master_db") if names.strip() == "": if not self.not_really: cf.set_synchronous_standby_names("") return if names.strip() == "*": if slave_names: if not self.not_really: cf.set_synchronous_standby_names(names) return else: die(1,"At least one slave must be available when enabling synchronous mode") # ensure that at least one slave is available from new parameter value slave_found = None for new_synch_slave in re.findall(r"[^\s,]+",names): if new_synch_slave not in slave_names: self.log.warning("No slave available with name %s", new_synch_slave) else: slave_found = True break if not slave_found: die(1,"At least one slave must be available from new list when enabling synchronous mode") elif not self.not_really: cf.set_synchronous_standby_names(names) def master_configure_archiving(self, enable_archiving, can_restart): """Turn the archiving on or off""" cf = PostgresConfiguration(self, self.cf.getfile("master_config")) curr_archive_mode = cf.archive_mode() curr_wal_level = cf.wal_level() need_restart_warning = False if enable_archiving: # enable archiving cf_file = os.path.abspath(self.cf.filename) xarchive = "%s %s %s" % (self.script, cf_file, "xarchive %p %f") cf_params = { "archive_command": xarchive } if curr_archive_mode is not None: # archive mode specified in config, turn it on self.log.debug("found 'archive_mode' in config -- enabling it") cf_params["archive_mode"] = "on" if curr_archive_mode.lower() not in ('1', 'on', 'true') and not can_restart: need_restart_warning = True if curr_wal_level is not None and curr_wal_level != 'hot_standby': # wal level set in config, enable it wal_level = self.cf.getboolean("hot_standby", False) and "hot_standby" or "archive" self.log.debug("found 'wal_level' in config -- setting to '%s'", wal_level) cf_params["wal_level"] = wal_level if curr_wal_level not in ("archive", "hot_standby") and not can_restart: need_restart_warning = True if need_restart_warning: self.log.warning("database must be restarted to enable archiving") else: # disable archiving cf_params = dict() if can_restart: # can restart, disable archive mode and set wal_level to minimal cf_params['archive_command'] = '' if curr_archive_mode: cf_params['archive_mode'] = 'off' if curr_wal_level: cf_params['wal_level'] = 'minimal' cf_params['max_wal_senders'] = '0' else: # not possible to change archive_mode or wal_level (requires restart), # so we just set the archive_command to /bin/true to avoid WAL pileup. self.log.info("database must be restarted to disable archiving") self.log.info("Setting archive_command to /bin/true to avoid WAL pileup") cf_params['archive_command'] = '/bin/true' # disable synchronous standbys, note that presently we don't care # if there is more than one standby. if cf.synchronous_standby_names(): cf_params['synchronous_standby_names'] = '' self.log.debug("modifying configuration: %s", cf_params) cf.modify(cf_params) cf.write() def slave_deconfigure_archiving(self, cf_file): """Disable archiving for the slave. This is done by setting archive_command to a trivial command, so that archiving can be re-enabled without restarting postgres. Needed when slave is booted with postgresql.conf from master.""" self.log.debug("Disable archiving in %s", cf_file) cf = PostgresConfiguration(self, cf_file) cf_params = { "archive_command": "/bin/true" } self.log.debug("modifying configuration: %s", cf_params) cf.modify(cf_params) cf.write() def remote_mkdir(self, remdir): tmp = remdir.split(":", 1) if len(tmp) < 1: raise Exception("cannot find pathname") elif len(tmp) < 2: self.exec_cmd([ "mkdir", "-p", tmp[0] ]) else: host, path = tmp cmdline = ["ssh", "-nT", host, "mkdir", "-p", path] self.exec_cmd(cmdline) def remote_walmgr(self, command, stdin_disabled = True, allow_error=False): """Pass a command to slave WalManager""" sshopt = "-T" if stdin_disabled: sshopt += "n" slave_config = self.cf.getfile("slave_config") if not slave_config: raise Exception("slave_config not specified in %s" % self.cfgfile) slave_host = self.cf.get("slave") cmdline = [ "ssh", sshopt, "-o", "Batchmode=yes", "-o", "StrictHostKeyChecking=no", slave_host, self.script, slave_config, command ] if self.not_really: cmdline += ["--not-really"] return self.exec_cmd(cmdline, allow_error) def remote_xlock(self): """ Obtain the backup lock to ensure that several backups are not run in parralel. If someone already has the lock we check if this is from a previous (failed) backup. If that is the case, the lock is released and re-obtained. """ xlock_cmd = "xlock %d" % os.getpid() ret = self.remote_walmgr(xlock_cmd, allow_error=True) if ret[0] != 0: # lock failed. try: lock_pid = int(ret[1]) except ValueError: self.log.fatal("Invalid pid in backup lock") sys.exit(1) try: os.kill(lock_pid, 0) self.log.fatal("Backup lock already taken") sys.exit(1) except OSError: # no process, carry on self.remote_walmgr("xrelease") self.remote_walmgr(xlock_cmd) def override_cf_option(self, option, value): """Set a configuration option, if it is unset""" if not self.cf.has_option(option): self.cf.cf.set('walmgr', option, value) def guess_locations(self): """ Guess PGDATA and configuration file locations. """ # find the PGDATA directory if self.options.pgdata: self.pgdata = self.options.pgdata elif 'PGDATA' in os.environ: self.pgdata = os.environ['PGDATA'] else: self.pgdata = "~/%s/main" % DEFAULT_PG_VERSION self.pgdata = os.path.expanduser(self.pgdata) if not os.path.isdir(self.pgdata): die(1, 'Postgres data directory not found: %s' % self.pgdata) postmaster_opts = os.path.join(self.pgdata, 'postmaster.opts') self.postgres_bin = "" self.postgres_conf = "" if os.path.exists(postmaster_opts): # postmaster_opts exists, attempt to guess various paths # get unquoted args from opts file cmdline = [ k.strip('"') for k in open(postmaster_opts).read().split() ] if cmdline: self.postgres_bin = os.path.dirname(cmdline[0]) cmdline = cmdline[1:] for item in cmdline: if item.startswith("config_file="): self.postgres_conf = item.split("=")[1] if not self.postgres_conf: self.postgres_conf = os.path.join(self.pgdata, "postgresql.conf") else: # no postmaster opts, resort to guessing self.log.info('postmaster.opts not found, resorting to guesses') # use the directory of first postgres executable from path for path in os.environ['PATH'].split(os.pathsep): path = os.path.expanduser(path) exe = os.path.join(path, "postgres") if os.path.isfile(exe): self.postgres_bin = path break else: # not found, use Debian default self.postgres_bin = "/usr/lib/postgresql/%s/bin" % DEFAULT_PG_VERSION if os.path.exists(self.pgdata): self.postgres_conf = os.path.join(self.pgdata, "postgresql.conf") else: self.postgres_conf = "/etc/postgresql/%s/main/postgresql.conf" % DEFAULT_PG_VERSION if not os.path.isdir(self.postgres_bin): die(1, "Postgres bin directory not found.") if not os.path.isfile(self.postgres_conf): if not self.options.init_slave: # postgres_conf is required for master die(1, "Configuration file not found: %s" % self.postgres_conf) # Attempt to guess the init.d script name script_suffixes = [ "9.1", "9.0", "8.4", "8.3", "8.2", "8.1", "8.0" ] self.initd_script = "/etc/init.d/postgresql" if not os.path.exists(self.initd_script): for suffix in script_suffixes: try_file = "%s-%s" % (self.initd_script, suffix) if os.path.exists(try_file): self.initd_script = try_file break else: self.initd_script = "%s -m fast -D %s" % \ (os.path.join(self.postgres_bin, "pg_ctl"), os.path.abspath(self.pgdata)) def write_walmgr_config(self, config_data): cf_name = os.path.join(os.path.expanduser(self.options.config_dir), self.cf.get("job_name") + ".ini") dirname = os.path.dirname(cf_name) if not os.path.isdir(dirname): self.log.info('Creating config directory: %s', dirname) os.makedirs(dirname) self.log.info('Writing configuration file: %s', cf_name) self.log.debug("config data:\n%s", config_data) if not self.not_really: cf = open(cf_name, "w") cf.write(config_data) cf.close() def walmgr_init_master(self): """ Initialize configuration file, generate SSH key pair if needed. """ self.guess_locations() if not self.options.slave: die(1, 'Specify slave host name with "--slave" option.') self.override_cf_option('master_bin', self.postgres_bin) self.override_cf_option('master_config', self.postgres_conf) self.override_cf_option('master_data', self.pgdata) # assume that slave config is in the same location as master's # can override with --set slave_config= slave_walmgr_dir = os.path.abspath(os.path.expanduser(self.options.config_dir)) self.override_cf_option('slave_config', os.path.join(slave_walmgr_dir, "wal-slave.ini")) master_config = """[walmgr] job_name = %(job_name)s logfile = %(logfile)s pidfile = %(pidfile)s use_skylog = 1 master_db = %(master_db)s master_data = %(master_data)s master_config = %(master_config)s master_bin = %(master_bin)s slave = %(slave)s slave_config = %(slave_config)s walmgr_data = %(walmgr_data)s completed_wals = %(completed_wals)s partial_wals = %(partial_wals)s full_backup = %(full_backup)s config_backup = %(config_backup)s keep_symlinks = %(keep_symlinks)s compression = %(compression)s """ try: opt_dict = dict([(k, self.cf.get(k)) for k in self.cf.options()]) opt_dict['slave'] = self.options.slave master_config = master_config % opt_dict except KeyError, e: die(1, 'Required setting missing: %s' % e) self.write_walmgr_config(master_config) # generate SSH key pair if requested if self.options.ssh_keygen: keyfile = os.path.expanduser("~/.ssh/id_dsa") if os.path.isfile(keyfile): self.log.info("SSH key %s already exists, skipping", keyfile) else: self.log.info("Generating ssh key: %s", keyfile) cmdline = ["ssh-keygen", "-t", "dsa", "-N", "", "-q", "-f", keyfile ] self.log.debug(' '.join(cmdline)) if not self.not_really: subprocess.call(cmdline) key = open(keyfile + ".pub").read().strip() self.log.info("public key: %s", key) def walmgr_init_slave(self): """ Initialize configuration file, move SSH pubkey into place. """ self.guess_locations() self.override_cf_option('slave_bin', self.postgres_bin) self.override_cf_option('slave_data', self.pgdata) self.override_cf_option('slave_config_dir', os.path.dirname(self.postgres_conf)) if self.initd_script: self.override_cf_option('slave_start_cmd', "%s start" % self.initd_script) self.override_cf_option('slave_stop_cmd', "%s stop" % self.initd_script) slave_config = """[walmgr] job_name = %(job_name)s logfile = %(logfile)s use_skylog = %(use_skylog)s slave_data = %(slave_data)s slave_bin = %(slave_bin)s slave_stop_cmd = %(slave_stop_cmd)s slave_start_cmd = %(slave_start_cmd)s slave_config_dir = %(slave_config_dir)s walmgr_data = %(walmgr_data)s completed_wals = %(completed_wals)s partial_wals = %(partial_wals)s full_backup = %(full_backup)s config_backup = %(config_backup)s """ if self.options.primary_conninfo: self.override_cf_option('primary_conninfo', self.options.primary_conninfo) slave_config += """ primary_conninfo = %(primary_conninfo)s """ try: opt_dict = dict([(k, self.cf.get(k)) for k in self.cf.options()]) slave_config = slave_config % opt_dict except KeyError, e: die(1, 'Required setting missing: %s' % e) self.write_walmgr_config(slave_config) if self.options.ssh_add_key: # add the named public key to authorized hosts ssh_dir = os.path.expanduser("~/.ssh") auth_file = os.path.join(ssh_dir, "authorized_keys") if not os.path.isdir(ssh_dir): self.log.info("Creating directory: %s", ssh_dir) if not self.not_really: os.mkdir(ssh_dir) self.log.debug("Reading public key from %s", self.options.ssh_add_key) master_pubkey = open(self.options.ssh_add_key).read() key_present = False if os.path.isfile(auth_file): for key in open(auth_file): if key == master_pubkey: self.log.info("Key already present in %s, skipping", auth_file) key_present = True if not key_present: self.log.info("Adding %s to %s", self.options.ssh_add_key, auth_file) if not self.not_really: af = open(auth_file, "a") af.write(master_pubkey) af.close() if self.options.add_password and self.options.primary_conninfo: # add password to pgpass self.log.debug("Reading password from file %s", self.options.add_password) pwd = open(self.options.add_password).readline().rstrip('\n\r') pg = Pgpass('~/.pgpass') host, port, user, _ = self.parse_conninfo(self.options.primary_conninfo) pg.ensure_user(host, port, user, pwd) pg.write() self.log.info("Added password from %s to .pgpass", self.options.add_password) def walmgr_setup(self): if self.is_master: self.log.info("Configuring WAL archiving") data_dir = self.cf.getfile("master_data") restart_cmd = self.cf.getfile("master_restart_cmd", "") self.master_configure_archiving(True, restart_cmd) # if we have a restart command, then use it, otherwise signal if restart_cmd: self.log.info("Restarting postmaster") self.exec_system(restart_cmd) else: self.log.info("Sending SIGHUP to postmaster") self.signal_postmaster(data_dir, signal.SIGHUP) # ask slave to init self.remote_walmgr("setup") self.log.info("Done") else: # create slave directory structure def mkdirs(dir): if not os.path.exists(dir): self.log.debug("Creating directory %s", dir) if not self.not_really: os.makedirs(dir) mkdirs(self.cf.getfile("completed_wals")) mkdirs(self.cf.getfile("partial_wals")) mkdirs(self.cf.getfile("full_backup")) cf_backup = self.cf.getfile("config_backup", "") if cf_backup: mkdirs(cf_backup) def master_periodic(self): """ Run periodic command on master node. We keep time using .walshipping.last file, so this has to be run before set_last_complete() """ self.assert_is_master(True) try: command_interval = self.cf.getint("command_interval", 0) periodic_command = self.cf.get("periodic_command", "") if periodic_command: check_file = os.path.join(self.cf.getfile("master_data"), ".walshipping.periodic") elapsed = 0 if os.path.isfile(check_file): elapsed = time.time() - os.stat(check_file).st_mtime self.log.info("Running periodic command: %s", periodic_command) if not elapsed or elapsed > command_interval: if not self.not_really: rc = os.WEXITSTATUS(self.exec_system(periodic_command)) if rc != 0: self.log.error("Periodic command exited with status %d", rc) # dont update timestamp - try again next time else: open(check_file,"w").write("1") else: self.log.debug("%d seconds elapsed, not enough to run periodic.", elapsed) except Exception, det: self.log.error("Failed to run periodic command: %s", det) def master_backup(self): """ Copy master data directory to slave. 1. Obtain backup lock on slave. 2. Rotate backups on slave 3. Perform backup as usual 4. Purge unneeded WAL-s from slave 5. Release backup lock """ self.remote_xlock() errors = False try: self.pg_start_backup("FullBackup") self.remote_walmgr("xrotate") data_dir = self.cf.getfile("master_data") dst_loc = self.cf.getfile("full_backup") if dst_loc[-1] != "/": dst_loc += "/" master_spc_dir = os.path.join(data_dir, "pg_tblspc") slave_spc_dir = dst_loc + "tmpspc" # copy data self.chdir(data_dir) cmdline = [ "--delete", "--exclude", ".*", "--exclude", "*.pid", "--exclude", "*.opts", "--exclude", "*.conf", "--exclude", "pg_xlog", "--exclude", "pg_tblspc", "--exclude", "pg_log", "--exclude", "base/pgsql_tmp", "--copy-unsafe-links", ".", dst_loc] self.exec_big_rsync(cmdline) # copy tblspc first, to test if os.path.isdir(master_spc_dir): self.log.info("Checking tablespaces") list = os.listdir(master_spc_dir) if len(list) > 0: self.remote_mkdir(slave_spc_dir) for tblspc in list: if tblspc[0] == ".": continue tfn = os.path.join(master_spc_dir, tblspc) if not os.path.islink(tfn): self.log.info("Suspicious pg_tblspc entry: %s", tblspc) continue spc_path = os.path.realpath(tfn) self.log.info("Got tablespace %s: %s", tblspc, spc_path) dstfn = slave_spc_dir + "/" + tblspc try: os.chdir(spc_path) except Exception, det: self.log.warning("Broken link: %s", det) continue cmdline = [ "--delete", "--exclude", ".*", "--copy-unsafe-links", ".", dstfn] self.exec_big_rsync(cmdline) # copy the pg_log and pg_xlog directories, these may be # symlinked to nonstandard location, so pay attention self.rsync_log_directory(os.path.join(data_dir, "pg_log"), dst_loc) self.rsync_log_directory(os.path.join(data_dir, "pg_xlog"), dst_loc) # copy config files conf_dst_loc = self.cf.getfile("config_backup", "") if conf_dst_loc: master_conf_dir = os.path.dirname(self.cf.getfile("master_config")) self.log.info("Backup conf files from %s", master_conf_dir) self.chdir(master_conf_dir) cmdline = [ "--include", "*.conf", "--exclude", "*", ".", conf_dst_loc] self.exec_big_rsync(cmdline) self.remote_walmgr("xpurgewals") except Exception, e: self.log.error(e) errors = True finally: try: self.pg_stop_backup() except: pass try: self.remote_walmgr("xrelease") except: pass if not errors: self.log.info("Full backup successful") else: self.log.error("Full backup failed.") def slave_backup(self): """ Create backup on slave host. 1. Obtain backup lock 2. Pause WAL apply 3. Wait for WAL apply to complete (look at PROGRESS file) 4. Rotate old backups 5. Copy data directory to data.master 6. Create backup label and history file. 7. Purge unneeded WAL-s 8. Resume WAL apply 9. Release backup lock """ self.assert_is_master(False) if self.slave_lock_backups() != 0: self.log.error("Cannot obtain backup lock.") sys.exit(1) try: self.slave_pause(waitcomplete=1) try: self.slave_rotate_backups() src = self.cf.getfile("slave_data") dst = self.cf.getfile("full_backup") start_time = time.localtime() cmdline = ["cp", "-a", src, dst ] self.log.info("Executing %s", " ".join(cmdline)) if not self.not_really: self.exec_cmd(cmdline) stop_time = time.localtime() # Obtain the last restart point information ctl = PgControlData(self.cf.getfile("slave_bin", ""), dst, True) # TODO: The newly created backup directory probably still contains # backup_label.old and recovery.conf files. Remove these. if not ctl.is_valid: self.log.warning("Unable to determine last restart point, backup_label not created.") else: # Write backup label and history file backup_label = \ """START WAL LOCATION: %(xlogid)X/%(xrecoff)X (file %(wal_name)s) CHECKPOINT LOCATION: %(xlogid)X/%(xrecoff)X START TIME: %(start_time)s LABEL: SlaveBackup" """ backup_history = \ """START WAL LOCATION: %(xlogid)X/%(xrecoff)X (file %(wal_name)s) STOP WAL LOCATION: %(xlogid)X/%(xrecoff)X (file %(wal_name)s) CHECKPOINT LOCATION: %(xlogid)X/%(xrecoff)X START TIME: %(start_time)s LABEL: SlaveBackup" STOP TIME: %(stop_time)s """ label_params = { "xlogid": ctl.xlogid, "xrecoff": ctl.xrecoff, "wal_name": ctl.wal_name, "start_time": time.strftime("%Y-%m-%d %H:%M:%S %Z", start_time), "stop_time": time.strftime("%Y-%m-%d %H:%M:%S %Z", stop_time), } # Write the label filename = os.path.join(dst, "backup_label") if self.not_really: self.log.info("Writing backup label to %s", filename) else: lf = open(filename, "w") lf.write(backup_label % label_params) lf.close() # Now the history histfile = "%s.%08X.backup" % (ctl.wal_name, ctl.xrecoff % ctl.wal_size) completed_wals = self.cf.getfile("completed_wals") filename = os.path.join(completed_wals, histfile) if os.path.exists(filename): self.log.warning("%s: already exists, refusing to overwrite.", filename) else: if self.not_really: self.log.info("Writing backup history to %s", filename) else: lf = open(filename, "w") lf.write(backup_history % label_params) lf.close() self.slave_purge_wals() finally: self.slave_continue() finally: self.slave_resume_backups() def run_backup(self): if self.is_master: self.master_backup() else: self.slave_backup() def master_xarchive(self): """Copy a complete WAL segment to slave.""" self.assert_is_master(True) if len(self.args) < 2: die(1, "usage: xarchive srcpath srcname") srcpath = self.args[0] srcname = self.args[1] start_time = time.time() self.log.debug("%s: start copy", srcname) self.master_periodic() dst_loc = self.cf.getfile("completed_wals") if dst_loc[-1] != "/": dst_loc += "/" # copy data self.exec_rsync([ srcpath, dst_loc ], True) # sync the buffers to disk - this is should reduce the chance # of WAL file corruption in case the slave crashes. slave = self.cf.get("slave") cmdline = ["ssh", "-nT", slave, "sync" ] self.exec_cmd(cmdline) # slave has the file now, set markers self.set_last_complete(srcname) self.log.debug("%s: done", srcname) end_time = time.time() self.stat_add('count', 1) self.stat_add('duration', end_time - start_time) self.send_stats() def slave_append_partial(self): """ Read 'bytes' worth of data from stdin, append to the partial log file starting from 'offset'. On error it is assumed that master restarts from zero. The resulting file is always padded to XLOG_SEGMENT_SIZE bytes to simplify recovery. """ def fail(message): self.log.error("Slave: %s: %s", filename, message) sys.exit(1) self.assert_is_master(False) if len(self.args) < 3: die(1, "usage: xpartialsync ") filename = self.args[0] offset = int(self.args[1]) bytes = int(self.args[2]) data = sys.stdin.read(bytes) if len(data) != bytes: fail("not enough data, expected %d, got %d" % (bytes, len(data))) chunk = WalChunk(filename, offset, bytes) self.log.debug("Slave: adding to %s", chunk) name = os.path.join(self.cf.getfile("partial_wals"), filename) if self.not_really: self.log.info("Adding to partial: %s", name) return try: xlog = open(name, (offset == 0) and "w+" or "r+") except: fail("unable to open partial WAL: %s" % name) xlog.seek(offset) xlog.write(data) # padd the file to 16MB boundary, use sparse files padsize = XLOG_SEGMENT_SIZE - xlog.tell() if padsize > 0: xlog.seek(XLOG_SEGMENT_SIZE-1) xlog.write('\0') xlog.close() def master_send_partial(self, xlog_dir, chunk, daemon_mode): """ Send the partial log chunk to slave. Use SSH with input redirection for the copy, consider other options if the overhead becomes visible. """ try: xlog = open(os.path.join(xlog_dir, chunk.filename)) except IOError, det: self.log.warning("Cannot access file %s", chunk.filename) return xlog.seek(chunk.pos) # Fork the sync process childpid = os.fork() syncstart = time.time() if childpid == 0: os.dup2(xlog.fileno(), sys.stdin.fileno()) try: self.remote_walmgr("xpartialsync %s %d %d" % (chunk.filename, chunk.pos, chunk.bytes), False) except: os._exit(1) os._exit(0) chunk.sync_time += (time.time() - syncstart) status = os.waitpid(childpid, 0) rc = os.WEXITSTATUS(status[1]) if rc == 0: log = daemon_mode and self.log.debug or self.log.info log("sent to slave: %s" % chunk) chunk.pos += chunk.bytes chunk.sync_count += 1 else: # Start from zero after an error chunk.pos = 0 self.log.error("xpartialsync exited with status %d, restarting from zero.", rc) time.sleep(5) def master_syncdaemon(self): self.assert_is_master(True) self.set_single_loop(0) self.master_sync(True) def master_sync(self, daemon_mode=False): """ Copy partial WAL segments to slave. On 8.2 set use_xlog_functions=1 in config file - this enables record based walshipping. On 8.0 the only option is to sync files. If daemon_mode is specified it never switches from record based shipping to file based shipping. """ self.assert_is_master(True) use_xlog_functions = self.cf.getint("use_xlog_functions", False) data_dir = self.cf.getfile("master_data") xlog_dir = os.path.join(data_dir, "pg_xlog") master_bin = self.cf.getfile("master_bin", "") dst_loc = os.path.join(self.cf.getfile("partial_wals"), "") db = None if use_xlog_functions: try: db = self.get_database("master_db", autocommit=1) except: self.log.warning("Database unavailable, record based log shipping not possible.") if daemon_mode: return if db: cur = db.cursor() cur.execute("select file_name, file_offset from pg_xlogfile_name_offset(pg_current_xlog_location())") (file_name, file_offs) = cur.fetchone() if not self.walchunk or self.walchunk.filename != file_name: # Switched to new WAL segment. Don't bother to copy the last bits - it # will be obsoleted by the archive_command. if self.walchunk and self.walchunk.sync_count > 0: self.log.info("Switched in %d seconds, %f sec in %d interim syncs, avg %f", time.time() - self.walchunk.start_time, self.walchunk.sync_time, self.walchunk.sync_count, self.walchunk.sync_time / self.walchunk.sync_count) self.walchunk = WalChunk(file_name, 0, file_offs) else: self.walchunk.bytes = file_offs - self.walchunk.pos if self.walchunk.bytes > 0: self.master_send_partial(xlog_dir, self.walchunk, daemon_mode) else: files = os.listdir(xlog_dir) files.sort() last = self.get_last_complete() if last: self.log.info("%s: last complete", last) else: self.log.info("last complete not found, copying all") # obtain the last checkpoint wal name, this can be used for # limiting the amount of WAL files to copy if the database # has been cleanly shut down ctl = PgControlData(master_bin, data_dir, False) checkpoint_wal = None if ctl.is_valid: if not ctl.is_shutdown: # cannot rely on the checkpoint wal, should use some other method self.log.info("Database state is not 'shut down', copying all") else: # ok, the database is shut down, we can use last checkpoint wal checkpoint_wal = ctl.wal_name self.log.info("last checkpoint wal: %s", checkpoint_wal) else: self.log.info("Unable to obtain control file information, copying all") for fn in files: # check if interesting file if len(fn) < 10: continue if fn[0] < "0" or fn[0] > '9': continue if fn.find(".") > 0: continue # check if too old if last: dot = last.find(".") if dot > 0: xlast = last[:dot] if fn < xlast: continue else: if fn <= last: continue # check if too new if checkpoint_wal and fn > checkpoint_wal: continue # got interesting WAL xlog = os.path.join(xlog_dir, fn) # copy data self.log.info('Syncing %s', xlog) if self.exec_rsync([xlog, dst_loc], not daemon_mode) != 0: self.log.error('Cannot sync %s', xlog) break else: self.log.info("Partial copy done") def xrestore(self): if len(self.args) < 2: die(1, "usage: xrestore srcname dstpath [last restartpoint wal]") srcname = self.args[0] dstpath = self.args[1] lstname = None if len(self.args) > 2: lstname = self.args[2] if self.is_master: self.master_xrestore(srcname, dstpath) else: self.slave_xrestore_unsafe(srcname, dstpath, os.getppid(), lstname) def slave_xrestore(self, srcname, dstpath): loop = 1 ppid = os.getppid() while loop: try: self.slave_xrestore_unsafe(srcname, dstpath, ppid) loop = 0 except SystemExit, d: sys.exit(1) except Exception, d: exc, msg, tb = sys.exc_info() self.log.fatal("xrestore %s crashed: %s: '%s' (%s: %r)", srcname, exc, str(msg).rstrip(), tb, traceback.format_tb(tb)) del tb time.sleep(10) self.log.info("Re-exec: %r", sys.argv) os.execv(sys.argv[0], sys.argv) def master_xrestore(self, srcname, dstpath): """ Restore the xlog file from slave. """ paths = [ self.cf.getfile("completed_wals"), self.cf.getfile("partial_wals") ] self.log.info("Restore %s to %s", srcname, dstpath) for src in paths: self.log.debug("Looking in %s", src) srcfile = os.path.join(src, srcname) if self.exec_rsync([srcfile, dstpath]) == 0: return self.log.warning("Could not restore file %s", srcname) def is_parent_alive(self, parent_pid): if os.getppid() != parent_pid or parent_pid <= 1: return False return True def slave_xrestore_unsafe(self, srcname, dstpath, parent_pid, lstname = None): srcdir = self.cf.getfile("completed_wals") partdir = self.cf.getfile("partial_wals") pausefile = os.path.join(srcdir, "PAUSE") stopfile = os.path.join(srcdir, "STOP") prgrfile = os.path.join(srcdir, "PROGRESS") prxlogfile = os.path.join(srcdir,"PG_RECEIVEXLOG") srcfile = os.path.join(srcdir, srcname) partfile = os.path.join(partdir, srcname) # if we are using streaming replication, exit immediately # if the srcfile is not here yet primary_conninfo = self.cf.get("primary_conninfo", "") if primary_conninfo and not os.path.isfile(srcfile): self.log.info("%s: not found (ignored)", srcname) # remove PG_RECEIVEXLOG file if it's present if os.path.isfile(prxlogfile) and not srcname.endswith('.history'): os.remove(prxlogfile) sys.exit(1) # assume that postgres has processed the WAL file and is # asking for next - hence work not in progress anymore if os.path.isfile(prgrfile): os.remove(prgrfile) # loop until srcfile or stopfile appears while 1: if os.path.isfile(pausefile): self.log.info("pause requested, sleeping") time.sleep(20) continue if os.path.isfile(srcfile): self.log.info("%s: Found", srcname) break # ignore .history files unused, ext = os.path.splitext(srcname) if ext == ".history": self.log.info("%s: not found, ignoring", srcname) sys.exit(1) # if stopping, include also partial wals if os.path.isfile(stopfile): if os.path.isfile(partfile): self.log.info("%s: found partial", srcname) srcfile = partfile break else: self.log.info("%s: not found, stopping", srcname) sys.exit(1) # nothing to do, just in case check if parent is alive if not self.is_parent_alive(parent_pid): self.log.warning("Parent dead, quitting") sys.exit(1) # nothing to do, sleep self.log.debug("%s: not found, sleeping", srcname) time.sleep(1) # got one, copy it cmdline = ["cp", srcfile, dstpath] self.exec_cmd(cmdline) if self.cf.getint("keep_backups", 0) == 0: # cleanup only if we don't keep backup history, keep the files needed # to roll forward from last restart point. If the restart point is not # handed to us (i.e 8.3 or later), then calculate it ourselves. # Note that historic WAL files are removed during backup rotation if lstname == None: lstname = self.last_restart_point(srcname) self.log.debug("calculated restart point: %s", lstname) else: self.log.debug("using supplied restart point: %s", lstname) self.log.debug("%s: copy done, cleanup", srcname) self.slave_cleanup(lstname) # create a PROGRESS file to notify that postgres is processing the WAL open(prgrfile, "w").write("1") # it would be nice to have apply time too self.stat_add('count', 1) self.send_stats() def restore_database(self, restore_config=True): """Restore the database from backup If setname is specified, the contents of that backup set directory are restored instead of "full_backup". Also copy is used instead of rename to restore the directory (unless a pg_xlog directory has been specified). Restore to altdst if specified. Complain if it exists. """ setname = len(self.args) > 0 and self.args[0] or None altdst = len(self.args) > 1 and self.args[1] or None if not self.is_master: data_dir = self.cf.getfile("slave_data") stop_cmd = self.cf.getfile("slave_stop_cmd", "") start_cmd = self.cf.getfile("slave_start_cmd") pidfile = os.path.join(data_dir, "postmaster.pid") else: if not setname or not altdst: die(1, "Source and target directories must be specified if running on master node.") data_dir = altdst stop_cmd = None pidfile = None if setname: full_dir = os.path.join(self.cf.getfile("walmgr_data"), setname) else: full_dir = self.cf.getfile("full_backup") # stop postmaster if ordered if stop_cmd and os.path.isfile(pidfile): self.log.info("Stopping postmaster: %s", stop_cmd) self.exec_system(stop_cmd) time.sleep(3) # is it dead? if pidfile and os.path.isfile(pidfile): self.log.info("Pidfile exists, checking if process is running.") if self.signal_postmaster(data_dir, 0): self.log.fatal("Postmaster still running. Cannot continue.") sys.exit(1) # find name for data backup i = 0 while 1: bak = "%s.%d" % (data_dir.rstrip("/"), i) if not os.path.isdir(bak): break i += 1 if self.is_master: print >>sys.stderr, "About to restore to directory %s. The postgres cluster should be shut down." % data_dir if not yesno("Is postgres shut down on %s ?" % data_dir): die(1, "Shut it down and try again.") if not self.is_master: createbackup = True elif os.path.isdir(data_dir): createbackup = yesno("Create backup of %s?" % data_dir) else: # nothing to back up createbackup = False # see if we have to make a backup of the data directory backup_datadir = self.cf.getboolean('backup_datadir', True) if os.path.isdir(data_dir) and not backup_datadir: self.log.warning('backup_datadir is disabled, deleting old data dir') shutil.rmtree(data_dir) if not setname and os.path.isdir(data_dir) and backup_datadir: # compatibility mode - restore without a set name and data directory exists self.log.info("Data directory already exists, moving it out of the way.") createbackup = True # move old data away if createbackup and os.path.isdir(data_dir): self.log.info("Move %s to %s", data_dir, bak) if not self.not_really: os.rename(data_dir, bak) # move new data, copy if setname specified self.log.info("%s %s to %s", setname and "Copy" or "Move", full_dir, data_dir) if self.cf.getfile('slave_pg_xlog', ''): link_xlog_dir = True exclude_pg_xlog = '--exclude=pg_xlog' else: link_xlog_dir = False exclude_pg_xlog = '' if not self.not_really: if not setname and not link_xlog_dir: os.rename(full_dir, data_dir) else: rsync_args=["--delete", "--no-relative", "--exclude=pg_xlog/*"] if exclude_pg_xlog: rsync_args.append(exclude_pg_xlog) rsync_args += [os.path.join(full_dir, ""), data_dir] self.exec_rsync(rsync_args, True) if link_xlog_dir: os.symlink(self.cf.getfile('slave_pg_xlog'), "%s/pg_xlog" % data_dir) if (self.is_master and createbackup and os.path.isdir(bak)): # restore original xlog files to data_dir/pg_xlog # symlinked directories are dereferenced self.exec_cmd(["cp", "-rL", "%s/pg_xlog/" % full_dir, "%s/pg_xlog" % data_dir ]) else: # create an archive_status directory xlog_dir = os.path.join(data_dir, "pg_xlog") archive_path = os.path.join(xlog_dir, "archive_status") if not os.path.exists(archive_path): os.mkdir(archive_path, 0700) else: data_dir = full_dir # copy configuration files to rotated backup directory if createbackup and os.path.isdir(bak): for cf in ('postgresql.conf', 'pg_hba.conf', 'pg_ident.conf'): cfsrc = os.path.join(bak, cf) cfdst = os.path.join(data_dir, cf) if os.path.exists(cfdst): self.log.info("Already exists: %s", cfdst) elif os.path.exists(cfsrc): self.log.debug("Copy %s to %s", cfsrc, cfdst) if not self.not_really: copy_conf(cfsrc, cfdst) # re-link tablespaces spc_dir = os.path.join(data_dir, "pg_tblspc") tmp_dir = os.path.join(data_dir, "tmpspc") if not os.path.isdir(spc_dir): # 8.3 requires its existence os.mkdir(spc_dir) if os.path.isdir(tmp_dir): self.log.info("Linking tablespaces to temporary location") # don't look into spc_dir, thus allowing # user to move them before. re-link only those # that are still in tmp_dir list = os.listdir(tmp_dir) list.sort() for d in list: if d[0] == ".": continue link_loc = os.path.abspath(os.path.join(spc_dir, d)) link_dst = os.path.abspath(os.path.join(tmp_dir, d)) self.log.info("Linking tablespace %s to %s", d, link_dst) if not self.not_really: if os.path.islink(link_loc): os.remove(link_loc) os.symlink(link_dst, link_loc) # write recovery.conf rconf = os.path.join(data_dir, "recovery.conf") cf_file = os.path.abspath(self.cf.filename) # determine if we can use %r in restore_command ctl = PgControlData(self.cf.getfile("slave_bin", ""), data_dir, True) if ctl.pg_version > 830: self.log.debug('pg_version is %s, adding %%r to restore command', ctl.pg_version) restore_command = 'xrestore %f "%p" %r' else: if not ctl.is_valid: self.log.warning('unable to run pg_controldata, assuming pre 8.3 environment') else: self.log.debug('using pg_controldata to determine restart points') restore_command = 'xrestore %f "%p"' conf = "restore_command = '%s %s %s'\n" % (self.script, cf_file, restore_command) # do we have streaming replication (hot standby) primary_conninfo = self.cf.get("primary_conninfo", "") if primary_conninfo: conf += "standby_mode = 'on'\n" conf += "trigger_file = '%s'\n" % os.path.join(self.cf.getfile("completed_wals"), "STOP") conf += "primary_conninfo = '%s'\n" % primary_conninfo conf += "archive_cleanup_command = '%s %s %%r'\n" % \ (os.path.join(self.cf.getfile("slave_bin"), "pg_archivecleanup"), self.cf.getfile("completed_wals")) self.log.info("Write %s", rconf) if self.not_really: print conf else: f = open(rconf, "w") f.write(conf) f.close() # remove stopfile on slave if not self.is_master: stopfile = os.path.join(self.cf.getfile("completed_wals"), "STOP") if os.path.isfile(stopfile): self.log.info("Removing stopfile: %s", stopfile) if not self.not_really: os.remove(stopfile) # attempt to restore configuration. Note that we cannot # postpone this to boot time, as the configuration is needed # to start postmaster. if restore_config: self.slave_restore_config() # run database in recovery mode self.log.info("Starting postmaster: %s", start_cmd) self.exec_system(start_cmd) else: self.log.info("Data files restored, recovery.conf created.") self.log.info("postgresql.conf and additional WAL files may need to be restored manually.") def slave_restore_config(self): """Restore the configuration files if target directory specified.""" self.assert_is_master(False) cf_source_dir = self.cf.getfile("config_backup", "") cf_target_dir = self.cf.getfile("slave_config_dir", "") if not cf_source_dir: self.log.info("Configuration backup location not specified.") return if not cf_target_dir: self.log.info("Configuration directory not specified, config files not restored.") return if not os.path.exists(cf_target_dir): self.log.warning("Configuration directory does not exist: %s", cf_target_dir) return self.log.info("Restoring configuration files") for cf in ('postgresql.conf', 'pg_hba.conf', 'pg_ident.conf'): cfsrc = os.path.join(cf_source_dir, cf) cfdst = os.path.join(cf_target_dir, cf) if not os.path.isfile(cfsrc): self.log.warning("Missing configuration file backup: %s", cf) continue self.log.debug("Copy %s to %s", cfsrc, cfdst) if not self.not_really: copy_conf(cfsrc, cfdst) if cf == 'postgresql.conf': self.slave_deconfigure_archiving(cfdst) def slave_boot(self): self.assert_is_master(False) srcdir = self.cf.getfile("completed_wals") datadir = self.cf.getfile("slave_data") stopfile = os.path.join(srcdir, "STOP") if self.not_really: self.log.info("Writing STOP file: %s", stopfile) else: open(stopfile, "w").write("1") self.log.info("Stopping recovery mode") def slave_createslave(self): self.assert_is_master(False) errors = False xlog_dir = self.cf.getfile("completed_wals") full_dir = self.cf.getfile("full_backup") prxloglock = os.path.join(xlog_dir,"PG_RECEIVEXLOG") pg_receivexlog = os.path.join(self.cf.getfile("slave_bin"), "pg_receivexlog") pg_basebackup = os.path.join(self.cf.getfile("slave_bin"), "pg_basebackup") # check if pg_receivexlog is available if not os.access(pg_receivexlog, os.X_OK): die(1, "pg_receivexlog not available") # check if pg_receivexlog is already running if os.path.isfile(prxloglock): pidstring = open(prxloglock,"r").read() try: pid =int(pidstring) try: os.kill(pid, 0) except OSError, e: if e.errno == errno.EPERM: self.log.fatal("Found pg_receivexlog lock file %s, pid %d in use", prxloglock, pid) sys.exit(1) elif e.errno == errno.ESRCH: self.log.info("Ignoring stale pg_receivexlog lock file") if not self.not_really: os.remove(prxloglock) else: self.log.fatal("pg_receivexlog is already running in %s, pid %d", xlog_dir, pid) sys.exit(1) except ValueError: self.log.fatal("pg_receivexlog lock file %s does not contain a pid: %s", prxloglock, pidstring) sys.exit(1) # create directories self.walmgr_setup() # ensure that backup destination is 0700 if not self.not_really: os.chmod(full_dir,0700) self.args = [str(os.getpid())] if self.slave_lock_backups() != 0: self.log.fatal("Cannot obtain backup lock.") sys.exit(1) # get host and user from primary_conninfo primary_conninfo = self.cf.get("primary_conninfo", "") if not primary_conninfo: die(1, "primary_conninfo missing") host, port, user, sslmode = self.parse_conninfo(primary_conninfo) # change sslmode for pg_receivexlog and pg_basebackup envssl=None if sslmode: envssl={"PGSSLMODE": sslmode} try: # determine postgres version, we cannot use pg_control version number since # 9.0 and 9.1 are using the same number in controlfile pg_ver = "" try: cmdline = [os.path.join(self.cf.getfile("slave_bin"), "postgres"),'-V'] process = subprocess.Popen(cmdline, stdout=subprocess.PIPE) output = process.communicate() pg_ver = output[0].split()[2] self.log.debug("PostgreSQL version: %s" % pg_ver) except: pass # create pg_receivexlog process cmdline = [pg_receivexlog,'-D', xlog_dir, '-h', host, '-U', user, '-p', port, '-w'] self.log.info("Starting pg_receivexlog") if not self.not_really: p_rxlog = subprocess.Popen(cmdline,env=envssl) # create pg_receivexlog lock file open(prxloglock, "w").write(str(p_rxlog.pid)) # leave error checking for pg_basebackup # if pg_basebackup command fails then pg_receivexlog is not working either # start backup self.log.info("Starting pg_basebackup") cmdline = [pg_basebackup, '-D', full_dir, '-h', host, '-U', user, '-p', port, '-w'] if not self.not_really: p_basebackup = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=envssl) output = p_basebackup.communicate() res = p_basebackup.returncode if res != 0: raise Exception("exec failed, res=%d (%r), %s" % (res, cmdline, output[1])) # fix skipped ssl symlinks (only relevant for 9.1) if pg_ver.startswith('9.1.'): for line in output[1].splitlines(): m = re.match('WARNING: skipping special file "\./server\.(crt|key)"', line) if m: # create symlinks if m.group(1) == 'crt': os.symlink('/etc/ssl/certs/ssl-cert-snakeoil.pem', os.path.join(full_dir,'server.crt')) elif m.group(1) == 'key': os.symlink('/etc/ssl/private/ssl-cert-snakeoil.key', os.path.join(full_dir,'server.key')) self.log.info("pg_basebackup finished successfully") # restore self.args = [] self.restore_database(False) # wait for recovery while os.path.isfile(prxloglock) and not self.not_really: time.sleep(5) except Exception, e: self.log.error(e) errors = True finally: # stop pg_receivexlog try: if not self.not_really: os.kill(p_rxlog.pid, signal.SIGTERM) self.log.info("pg_receivelog stopped") except Exception, det: self.log.warning("Failed to stop pg_receivexlog: %s", det) # cleanup if os.path.isfile(prxloglock): os.remove(prxloglock) if not self.not_really: for f in os.listdir(xlog_dir): if f.endswith('.partial'): self.log.debug("Removing %s", os.path.join(xlog_dir,f)) os.remove(os.path.join(xlog_dir,f)) if not self.not_really and os.path.isdir(full_dir): shutil.rmtree(full_dir) self.slave_resume_backups() if not errors: self.log.info("Streaming replication standby created successfully") else: self.log.error("Failed to create streaming replication standby") sys.exit(1) def slave_pause(self, waitcomplete=0): """Pause the WAL apply, wait until last file applied if needed""" self.assert_is_master(False) srcdir = self.cf.getfile("completed_wals") pausefile = os.path.join(srcdir, "PAUSE") if not self.not_really: open(pausefile, "w").write("1") else: self.log.info("Writing PAUSE file: %s", pausefile) self.log.info("Pausing recovery mode") # wait for log apply to complete if waitcomplete: prgrfile = os.path.join(srcdir, "PROGRESS") stopfile = os.path.join(srcdir, "STOP") if os.path.isfile(stopfile): self.log.warning("Recovery is stopped, backup is invalid if the database is open.") return while os.path.isfile(prgrfile): self.log.info("Waiting for WAL processing to complete ...") if self.not_really: return time.sleep(1) def slave_continue(self): self.assert_is_master(False) srcdir = self.cf.getfile("completed_wals") pausefile = os.path.join(srcdir, "PAUSE") if os.path.isfile(pausefile): if not self.not_really: os.remove(pausefile) self.log.info("Continuing with recovery") else: self.log.info("Recovery not paused?") def slave_lock_backups_exit(self): """Exit with lock acquired status""" self.assert_is_master(False) sys.exit(self.slave_lock_backups()) def slave_lock_backups(self): """Create lock file to deny other concurrent backups""" srcdir = self.cf.getfile("completed_wals") lockfile = os.path.join(srcdir, "BACKUPLOCK") if os.path.isfile(lockfile): self.log.warning("Somebody already has the backup lock.") lockfilehandle = open(lockfile,"r") pidstring = lockfilehandle.read(); try: pid = int(pidstring) print("%d" % pid) except ValueError: self.log.error("lock file does not contain a pid: %s", pidstring) return 1 if not self.not_really: f = open(lockfile, "w") if len(self.args) > 0: f.write(self.args[0]) f.close() self.log.info("Backup lock obtained.") return 0 def slave_resume_backups(self): """Remove backup lock file, allow other backups to run""" self.assert_is_master(False) srcdir = self.cf.getfile("completed_wals") lockfile = os.path.join(srcdir, "BACKUPLOCK") if os.path.isfile(lockfile): if not self.not_really: os.remove(lockfile) self.log.info("Backup lock released.") else: self.log.info("Backup lock not held.") def list_backups(self): """List available backups. On master this just calls slave listbackups via SSH""" if self.is_master: self.remote_walmgr("listbackups") else: backups = self.get_backup_list(self.cf.getfile("full_backup")) if backups: print "\nList of backups:\n" print "%-15s %-24s %-11s %-24s" % \ ("Backup set", "Timestamp", "Label", "First WAL") print "%s %s %s %s" % (15*'-', 24*'-', 11*'-',24*'-') for backup in backups: lbl = BackupLabel(backup) print "%-15s %-24.24s %-11.11s %-24s" % \ (os.path.basename(backup), lbl.start_time, lbl.label_string, lbl.first_wal) print else: print "\nNo backups found.\n" def get_first_walname(self,backupdir): """Returns the name of the first needed WAL segment for backupset""" label = BackupLabel(backupdir) if not label.first_wal: self.log.error("WAL name not found at %s", backupdir) return None return label.first_wal def last_restart_point(self,walname): """ Determine the WAL file of the last restart point (recovery checkpoint). For 8.3 this could be done with %r parameter to restore_command, for 8.2 we need to consult control file (parse pg_controldata output). """ slave_data = self.cf.getfile("slave_data") backup_label = os.path.join(slave_data, "backup_label") if os.path.exists(backup_label): # Label file still exists, use it for determining the restart point lbl = BackupLabel(slave_data) self.log.debug("Last restart point from backup_label: %s", lbl.first_wal) return lbl.first_wal ctl = PgControlData(self.cf.getfile("slave_bin", ""), ".", True) if not ctl.is_valid: # No restart point information, use the given wal name self.log.warning("Unable to determine last restart point") return walname self.log.debug("Last restart point: %s", ctl.wal_name) return ctl.wal_name def order_backupdirs(self,prefix,a,b): """Compare the backup directory indexes numerically""" prefix = os.path.abspath(prefix) a_indx = a[len(prefix)+1:] if not a_indx: a_indx = -1 b_indx = b[len(prefix)+1:] if not b_indx: b_indx = -1 return cmp(int(a_indx), int(b_indx)) def get_backup_list(self,dst_loc): """Return the list of backup directories""" dirlist = glob.glob(os.path.abspath(dst_loc) + "*") dirlist.sort(lambda x,y: self.order_backupdirs(dst_loc, x,y)) backupdirs = [ dir for dir in dirlist if os.path.isdir(dir) and os.path.isfile(os.path.join(dir, "backup_label")) or os.path.isfile(os.path.join(dir, "backup_label.old"))] return backupdirs def slave_purge_wals(self): """ Remove WAL files not needed for recovery """ self.assert_is_master(False) backups = self.get_backup_list(self.cf.getfile("full_backup")) if backups: lastwal = self.get_first_walname(backups[-1]) if lastwal: self.log.info("First useful WAL file is: %s", lastwal) self.slave_cleanup(lastwal) else: self.log.debug("No WAL-s to clean up.") def slave_rotate_backups(self): """ Rotate backups by increasing backup directory suffixes. Note that since we also have to make room for next backup, we actually have keep_backups - 1 backups available after this. Unneeded WAL files are not removed here, handled by xpurgewals command instead. """ self.assert_is_master(False) dst_loc = self.cf.getfile("full_backup") maxbackups = self.cf.getint("keep_backups", 0) archive_command = self.cf.get("archive_command", "") backupdirs = self.get_backup_list(dst_loc) if not backupdirs or maxbackups < 1: self.log.debug("Nothing to rotate") # remove expired backups while len(backupdirs) >= maxbackups and len(backupdirs) > 0: last = backupdirs.pop() # if archive_command is set, run it before removing the directory # Resume only if archive command succeeds. if archive_command: cmdline = archive_command.replace("$BACKUPDIR", last) self.log.info("Executing archive_command: %s", cmdline) rc = self.exec_system(cmdline) if rc != 0: self.log.error("Backup archiving returned %d, exiting!", rc) sys.exit(1) self.log.info("Removing expired backup directory: %s", last) if self.not_really: continue cmdline = [ "rm", "-r", last ] self.exec_cmd(cmdline) # bump the suffixes if base directory exists if os.path.isdir(dst_loc): backupdirs.sort(lambda x,y: self.order_backupdirs(dst_loc, y,x)) for dir in backupdirs: (name, index) = os.path.splitext(dir) if not re.match('\.[0-9]+$', index): name = name + index index = 0 else: index = int(index[1:])+1 self.log.debug("Rename %s to %s.%s", dir, name, index) if self.not_really: continue os.rename(dir, "%s.%s" % (name,index)) def slave_cleanup(self, last_applied): completed_wals = self.cf.getfile("completed_wals") partial_wals = self.cf.getfile("partial_wals") self.log.debug("cleaning completed wals before %s", last_applied) self.del_wals(completed_wals, last_applied) if os.path.isdir(partial_wals): self.log.debug("cleaning partial wals before %s", last_applied) self.del_wals(partial_wals, last_applied) else: self.log.warning("partial_wals dir does not exist: %s", partial_wals) self.log.debug("cleaning done") def del_wals(self, path, last): dot = last.find(".") if dot > 0: last = last[:dot] list = os.listdir(path) list.sort() cur_last = None n = len(list) for i in range(n): fname = list[i] full = os.path.join(path, fname) if fname[0] < "0" or fname[0] > "9": continue if not fname.startswith(last[0:8]): # only look at WAL segments in a same timeline continue ok_del = 0 if fname < last: self.log.debug("deleting %s", full) if not self.not_really: try: os.remove(full) except: # don't report the errors if the file has been already removed # happens due to conflicts with pg_archivecleanup for instance. pass cur_last = fname return cur_last if __name__ == "__main__": script = WalMgr(sys.argv[1:]) script.start() skytools-3.2.6/python/modules/0000755000000000000000000000000012426435645013303 5ustar skytools-3.2.6/python/modules/hashtext.c0000644000000000000000000002236112426435645015303 0ustar /* * Postgres hashes for Python. */ #define PY_SSIZE_T_CLEAN #include #if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #endif #include #include typedef uint32_t (*hash_fn_t)(const void *src, unsigned src_len); typedef uint8_t uint8; typedef uint16_t uint16; typedef uint32_t uint32; #define rot(x, k) (((x)<<(k)) | ((x)>>(32-(k)))) /* * Old Postgres hashtext() */ #define mix_old(a,b,c) \ { \ a -= b; a -= c; a ^= ((c)>>13); \ b -= c; b -= a; b ^= ((a)<<8); \ c -= a; c -= b; c ^= ((b)>>13); \ a -= b; a -= c; a ^= ((c)>>12); \ b -= c; b -= a; b ^= ((a)<<16); \ c -= a; c -= b; c ^= ((b)>>5); \ a -= b; a -= c; a ^= ((c)>>3); \ b -= c; b -= a; b ^= ((a)<<10); \ c -= a; c -= b; c ^= ((b)>>15); \ } static uint32_t hash_old_hashtext(const void *_k, unsigned keylen) { const unsigned char *k = _k; register uint32 a, b, c, len; /* Set up the internal state */ len = keylen; a = b = 0x9e3779b9; /* the golden ratio; an arbitrary value */ c = 3923095; /* initialize with an arbitrary value */ /* handle most of the key */ while (len >= 12) { a += (k[0] + ((uint32) k[1] << 8) + ((uint32) k[2] << 16) + ((uint32) k[3] << 24)); b += (k[4] + ((uint32) k[5] << 8) + ((uint32) k[6] << 16) + ((uint32) k[7] << 24)); c += (k[8] + ((uint32) k[9] << 8) + ((uint32) k[10] << 16) + ((uint32) k[11] << 24)); mix_old(a, b, c); k += 12; len -= 12; } /* handle the last 11 bytes */ c += keylen; switch (len) /* all the case statements fall through */ { case 11: c += ((uint32) k[10] << 24); case 10: c += ((uint32) k[9] << 16); case 9: c += ((uint32) k[8] << 8); /* the first byte of c is reserved for the length */ case 8: b += ((uint32) k[7] << 24); case 7: b += ((uint32) k[6] << 16); case 6: b += ((uint32) k[5] << 8); case 5: b += k[4]; case 4: a += ((uint32) k[3] << 24); case 3: a += ((uint32) k[2] << 16); case 2: a += ((uint32) k[1] << 8); case 1: a += k[0]; /* case 0: nothing left to add */ } mix_old(a, b, c); /* report the result */ return c; } /* * New Postgres hashtext() */ #define UINT32_ALIGN_MASK 3 #define mix_new(a,b,c) \ { \ a -= c; a ^= rot(c, 4); c += b; \ b -= a; b ^= rot(a, 6); a += c; \ c -= b; c ^= rot(b, 8); b += a; \ a -= c; a ^= rot(c,16); c += b; \ b -= a; b ^= rot(a,19); a += c; \ c -= b; c ^= rot(b, 4); b += a; \ } #define final_new(a,b,c) \ { \ c ^= b; c -= rot(b,14); \ a ^= c; a -= rot(c,11); \ b ^= a; b -= rot(a,25); \ c ^= b; c -= rot(b,16); \ a ^= c; a -= rot(c, 4); \ b ^= a; b -= rot(a,14); \ c ^= b; c -= rot(b,24); \ } static uint32_t hash_new_hashtext(const void *_k, unsigned keylen) { const unsigned char *k = _k; uint32_t a, b, c, len; /* Set up the internal state */ len = keylen; a = b = c = 0x9e3779b9 + len + 3923095; /* If the source pointer is word-aligned, we use word-wide fetches */ if (((long) k & UINT32_ALIGN_MASK) == 0) { /* Code path for aligned source data */ register const uint32_t *ka = (const uint32_t *) k; /* handle most of the key */ while (len >= 12) { a += ka[0]; b += ka[1]; c += ka[2]; mix_new(a, b, c); ka += 3; len -= 12; } /* handle the last 11 bytes */ k = (const unsigned char *) ka; #ifdef WORDS_BIGENDIAN switch (len) { case 11: c += ((uint32) k[10] << 8); /* fall through */ case 10: c += ((uint32) k[9] << 16); /* fall through */ case 9: c += ((uint32) k[8] << 24); /* the lowest byte of c is reserved for the length */ /* fall through */ case 8: b += ka[1]; a += ka[0]; break; case 7: b += ((uint32) k[6] << 8); /* fall through */ case 6: b += ((uint32) k[5] << 16); /* fall through */ case 5: b += ((uint32) k[4] << 24); /* fall through */ case 4: a += ka[0]; break; case 3: a += ((uint32) k[2] << 8); /* fall through */ case 2: a += ((uint32) k[1] << 16); /* fall through */ case 1: a += ((uint32) k[0] << 24); /* case 0: nothing left to add */ } #else /* !WORDS_BIGENDIAN */ switch (len) { case 11: c += ((uint32) k[10] << 24); /* fall through */ case 10: c += ((uint32) k[9] << 16); /* fall through */ case 9: c += ((uint32) k[8] << 8); /* the lowest byte of c is reserved for the length */ /* fall through */ case 8: b += ka[1]; a += ka[0]; break; case 7: b += ((uint32) k[6] << 16); /* fall through */ case 6: b += ((uint32) k[5] << 8); /* fall through */ case 5: b += k[4]; /* fall through */ case 4: a += ka[0]; break; case 3: a += ((uint32) k[2] << 16); /* fall through */ case 2: a += ((uint32) k[1] << 8); /* fall through */ case 1: a += k[0]; /* case 0: nothing left to add */ } #endif /* WORDS_BIGENDIAN */ } else { /* Code path for non-aligned source data */ /* handle most of the key */ while (len >= 12) { #ifdef WORDS_BIGENDIAN a += (k[3] + ((uint32) k[2] << 8) + ((uint32) k[1] << 16) + ((uint32) k[0] << 24)); b += (k[7] + ((uint32) k[6] << 8) + ((uint32) k[5] << 16) + ((uint32) k[4] << 24)); c += (k[11] + ((uint32) k[10] << 8) + ((uint32) k[9] << 16) + ((uint32) k[8] << 24)); #else /* !WORDS_BIGENDIAN */ a += (k[0] + ((uint32) k[1] << 8) + ((uint32) k[2] << 16) + ((uint32) k[3] << 24)); b += (k[4] + ((uint32) k[5] << 8) + ((uint32) k[6] << 16) + ((uint32) k[7] << 24)); c += (k[8] + ((uint32) k[9] << 8) + ((uint32) k[10] << 16) + ((uint32) k[11] << 24)); #endif /* WORDS_BIGENDIAN */ mix_new(a, b, c); k += 12; len -= 12; } /* handle the last 11 bytes */ #ifdef WORDS_BIGENDIAN switch (len) /* all the case statements fall through */ { case 11: c += ((uint32) k[10] << 8); case 10: c += ((uint32) k[9] << 16); case 9: c += ((uint32) k[8] << 24); /* the lowest byte of c is reserved for the length */ case 8: b += k[7]; case 7: b += ((uint32) k[6] << 8); case 6: b += ((uint32) k[5] << 16); case 5: b += ((uint32) k[4] << 24); case 4: a += k[3]; case 3: a += ((uint32) k[2] << 8); case 2: a += ((uint32) k[1] << 16); case 1: a += ((uint32) k[0] << 24); /* case 0: nothing left to add */ } #else /* !WORDS_BIGENDIAN */ switch (len) /* all the case statements fall through */ { case 11: c += ((uint32) k[10] << 24); case 10: c += ((uint32) k[9] << 16); case 9: c += ((uint32) k[8] << 8); /* the lowest byte of c is reserved for the length */ case 8: b += ((uint32) k[7] << 24); case 7: b += ((uint32) k[6] << 16); case 6: b += ((uint32) k[5] << 8); case 5: b += k[4]; case 4: a += ((uint32) k[3] << 24); case 3: a += ((uint32) k[2] << 16); case 2: a += ((uint32) k[1] << 8); case 1: a += k[0]; /* case 0: nothing left to add */ } #endif /* WORDS_BIGENDIAN */ } final_new(a, b, c); /* report the result */ return c; } /* * Get string data from Python object. */ static Py_ssize_t get_buffer(PyObject *obj, unsigned char **buf_p, PyObject **tmp_obj_p) { PyBufferProcs *bfp; PyObject *str = NULL; Py_ssize_t res; /* check for None */ if (obj == Py_None) { PyErr_Format(PyExc_TypeError, "None is not allowed"); return -1; } /* is string or unicode ? */ if (PyString_Check(obj) || PyUnicode_Check(obj)) { if (PyString_AsStringAndSize(obj, (char**)buf_p, &res) < 0) return -1; return res; } /* try to get buffer */ bfp = obj->ob_type->tp_as_buffer; if (bfp && bfp->bf_getsegcount && bfp->bf_getreadbuffer) { if (bfp->bf_getsegcount(obj, NULL) == 1) return bfp->bf_getreadbuffer(obj, 0, (void**)buf_p); } /* * Not a string-like object, run str() or it. */ /* are we in recursion? */ if (tmp_obj_p == NULL) { PyErr_Format(PyExc_TypeError, "Cannot convert to string - get_buffer() recusively failed"); return -1; } /* do str() then */ str = PyObject_Str(obj); res = -1; if (str != NULL) { res = get_buffer(str, buf_p, NULL); if (res >= 0) { *tmp_obj_p = str; } else { Py_CLEAR(str); } } return res; } /* * Common argument parsing. */ static PyObject *run_hash(PyObject *args, hash_fn_t real_hash) { unsigned char *src = NULL; Py_ssize_t src_len; PyObject *arg, *strtmp = NULL; int32_t hash; if (!PyArg_ParseTuple(args, "O", &arg)) return NULL; src_len = get_buffer(arg, &src, &strtmp); if (src_len < 0) return NULL; hash = real_hash(src, src_len); Py_CLEAR(strtmp); return PyInt_FromLong(hash); } /* * Python wrappers around actual hash functions. */ static PyObject *hashtext_old(PyObject *self, PyObject *args) { return run_hash(args, hash_old_hashtext); } static PyObject *hashtext_new(PyObject *self, PyObject *args) { return run_hash(args, hash_new_hashtext); } /* * Module initialization */ static PyMethodDef methods[] = { { "hashtext_old", hashtext_old, METH_VARARGS, "Old Postgres hashtext().\n" }, { "hashtext_new", hashtext_new, METH_VARARGS, "New Postgres hashtext().\n" }, { NULL } }; PyMODINIT_FUNC init_chashtext(void) { PyObject *module; module = Py_InitModule("_chashtext", methods); PyModule_AddStringConstant(module, "__doc__", "String hash functions"); } skytools-3.2.6/python/modules/cquoting.c0000644000000000000000000004154312426435645015307 0ustar /* * Fast quoting functions for Python. */ #define PY_SSIZE_T_CLEAN #include #include #if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #endif #ifdef _MSC_VER #define inline __inline #define strcasecmp stricmp #endif /* * Common buffer management. */ struct Buf { unsigned char *ptr; unsigned long pos; unsigned long alloc; }; static unsigned char *buf_init(struct Buf *buf, unsigned init_size) { if (init_size < 256) init_size = 256; buf->ptr = PyMem_Malloc(init_size); if (buf->ptr) { buf->pos = 0; buf->alloc = init_size; } return buf->ptr; } /* return new pos */ static unsigned char *buf_enlarge(struct Buf *buf, unsigned need_room) { unsigned alloc = buf->alloc; unsigned need_size = buf->pos + need_room; unsigned char *ptr; /* no alloc needed */ if (need_size < alloc) return buf->ptr + buf->pos; if (alloc <= need_size / 2) alloc = need_size; else alloc = alloc * 2; ptr = PyMem_Realloc(buf->ptr, alloc); if (!ptr) return NULL; buf->ptr = ptr; buf->alloc = alloc; return buf->ptr + buf->pos; } static void buf_free(struct Buf *buf) { PyMem_Free(buf->ptr); buf->ptr = NULL; buf->pos = buf->alloc = 0; } static inline unsigned char *buf_get_target_for(struct Buf *buf, unsigned len) { if (buf->pos + len <= buf->alloc) return buf->ptr + buf->pos; else return buf_enlarge(buf, len); } static inline void buf_set_target(struct Buf *buf, unsigned char *newpos) { assert(buf->ptr + buf->pos <= newpos); assert(buf->ptr + buf->alloc >= newpos); buf->pos = newpos - buf->ptr; } static inline int buf_put(struct Buf *buf, unsigned char c) { if (buf->pos < buf->alloc) { buf->ptr[buf->pos++] = c; return 1; } else if (buf_enlarge(buf, 1)) { buf->ptr[buf->pos++] = c; return 1; } return 0; } static PyObject *buf_pystr(struct Buf *buf, unsigned start_pos, unsigned char *newpos) { PyObject *res; if (newpos) buf_set_target(buf, newpos); res = PyString_FromStringAndSize((char *)buf->ptr + start_pos, buf->pos - start_pos); buf_free(buf); return res; } /* * Get string data */ static Py_ssize_t get_buffer(PyObject *obj, unsigned char **buf_p, PyObject **tmp_obj_p) { PyBufferProcs *bfp; PyObject *str = NULL; Py_ssize_t res; /* check for None */ if (obj == Py_None) { PyErr_Format(PyExc_TypeError, "None is not allowed here"); return -1; } /* is string or unicode ? */ if (PyString_Check(obj) || PyUnicode_Check(obj)) { if (PyString_AsStringAndSize(obj, (char**)buf_p, &res) < 0) return -1; return res; } /* try to get buffer */ bfp = obj->ob_type->tp_as_buffer; if (bfp && bfp->bf_getsegcount && bfp->bf_getreadbuffer) { if (bfp->bf_getsegcount(obj, NULL) == 1) return bfp->bf_getreadbuffer(obj, 0, (void**)buf_p); } /* * Not a string-like object, run str() or it. */ /* are we in recursion? */ if (tmp_obj_p == NULL) { PyErr_Format(PyExc_TypeError, "Cannot convert to string - get_buffer() recusively failed"); return -1; } /* do str() then */ str = PyObject_Str(obj); res = -1; if (str != NULL) { res = get_buffer(str, buf_p, NULL); if (res >= 0) { *tmp_obj_p = str; } else { Py_CLEAR(str); } } return res; } /* * Common argument parsing. */ typedef PyObject *(*quote_fn)(unsigned char *src, Py_ssize_t src_len); static PyObject *common_quote(PyObject *args, quote_fn qfunc) { unsigned char *src = NULL; Py_ssize_t src_len = 0; PyObject *arg, *res, *strtmp = NULL; if (!PyArg_ParseTuple(args, "O", &arg)) return NULL; if (arg != Py_None) { src_len = get_buffer(arg, &src, &strtmp); if (src_len < 0) return NULL; } res = qfunc(src, src_len); Py_CLEAR(strtmp); return res; } /* * Simple quoting functions. */ static const char doc_quote_literal[] = "Quote a literal value for SQL.\n" "\n" "If string contains '\\', it is quoted and result is prefixed with E.\n" "Input value of None results in string \"null\" without quotes.\n" "\n" "C implementation.\n"; static PyObject *quote_literal_body(unsigned char *src, Py_ssize_t src_len) { struct Buf buf; unsigned char *esc, *dst, *src_end = src + src_len; unsigned int start_ofs = 1; if (src == NULL) return PyString_FromString("null"); esc = dst = buf_init(&buf, src_len * 2 + 2 + 1); if (!dst) return NULL; *dst++ = ' '; *dst++ = '\''; while (src < src_end) { if (*src == '\\') { *dst++ = '\\'; start_ofs = 0; } else if (*src == '\'') { *dst++ = '\''; } *dst++ = *src++; } *dst++ = '\''; if (start_ofs == 0) *esc = 'E'; return buf_pystr(&buf, start_ofs, dst); } static PyObject *quote_literal(PyObject *self, PyObject *args) { return common_quote(args, quote_literal_body); } /* COPY field */ static const char doc_quote_copy[] = "Quoting for COPY data. None is converted to \\N.\n\n" "C implementation."; static PyObject *quote_copy_body(unsigned char *src, Py_ssize_t src_len) { unsigned char *dst, *src_end = src + src_len; struct Buf buf; if (src == NULL) return PyString_FromString("\\N"); dst = buf_init(&buf, src_len * 2); if (!dst) return NULL; while (src < src_end) { switch (*src) { case '\t': *dst++ = '\\'; *dst++ = 't'; src++; break; case '\n': *dst++ = '\\'; *dst++ = 'n'; src++; break; case '\r': *dst++ = '\\'; *dst++ = 'r'; src++; break; case '\\': *dst++ = '\\'; *dst++ = '\\'; src++; break; default: *dst++ = *src++; break; } } return buf_pystr(&buf, 0, dst); } static PyObject *quote_copy(PyObject *self, PyObject *args) { return common_quote(args, quote_copy_body); } /* raw bytea for byteain() */ static const char doc_quote_bytea_raw[] = "Quoting for bytea parser. Returns None as None.\n" "\n" "C implementation."; static PyObject *quote_bytea_raw_body(unsigned char *src, Py_ssize_t src_len) { unsigned char *dst, *src_end = src + src_len; struct Buf buf; if (src == NULL) { Py_INCREF(Py_None); return Py_None; } dst = buf_init(&buf, src_len * 4); if (!dst) return NULL; while (src < src_end) { if (*src < 0x20 || *src >= 0x7F) { *dst++ = '\\'; *dst++ = '0' + (*src >> 6); *dst++ = '0' + ((*src >> 3) & 7); *dst++ = '0' + (*src & 7); src++; } else { if (*src == '\\') *dst++ = '\\'; *dst++ = *src++; } } return buf_pystr(&buf, 0, dst); } static PyObject *quote_bytea_raw(PyObject *self, PyObject *args) { return common_quote(args, quote_bytea_raw_body); } /* SQL unquote */ static const char doc_unquote_literal[] = "Unquote SQL value.\n\n" "E'..' -> extended quoting.\n" "'..' -> standard or extended quoting\n" "null -> None\n" "other -> returned as-is\n\n" "C implementation.\n"; static PyObject *do_sql_ext(unsigned char *src, Py_ssize_t src_len) { unsigned char *dst, *src_end = src + src_len; struct Buf buf; dst = buf_init(&buf, src_len); if (!dst) return NULL; while (src < src_end) { if (*src == '\'') { src++; if (src < src_end && *src == '\'') { *dst++ = *src++; continue; } goto failed; } if (*src != '\\') { *dst++ = *src++; continue; } if (++src >= src_end) goto failed; switch (*src) { case 't': *dst++ = '\t'; src++; break; case 'n': *dst++ = '\n'; src++; break; case 'r': *dst++ = '\r'; src++; break; case 'a': *dst++ = '\a'; src++; break; case 'b': *dst++ = '\b'; src++; break; default: if (*src >= '0' && *src <= '7') { unsigned char c = *src++ - '0'; if (src < src_end && *src >= '0' && *src <= '7') { c = (c << 3) | ((*src++) - '0'); if (src < src_end && *src >= '0' && *src <= '7') c = (c << 3) | ((*src++) - '0'); } *dst++ = c; } else { *dst++ = *src++; } } } return buf_pystr(&buf, 0, dst); failed: PyErr_Format(PyExc_ValueError, "Broken exteded SQL string"); return NULL; } static PyObject *do_sql_std(unsigned char *src, Py_ssize_t src_len) { unsigned char *dst, *src_end = src + src_len; struct Buf buf; dst = buf_init(&buf, src_len); if (!dst) return NULL; while (src < src_end) { if (*src != '\'') { *dst++ = *src++; continue; } src++; if (src >= src_end || *src != '\'') goto failed; *dst++ = *src++; } return buf_pystr(&buf, 0, dst); failed: PyErr_Format(PyExc_ValueError, "Broken standard SQL string"); return NULL; } static PyObject *do_dolq(unsigned char *src, Py_ssize_t src_len) { /* src_len >= 2, '$' in start and end */ unsigned char *src_end = src + src_len; unsigned char *p1 = src + 1, *p2 = src_end - 2; while (p1 < src_end && *p1 != '$') p1++; while (p2 > src && *p2 != '$') p2--; if (p2 <= p1) goto failed; p1++; /* position after '$' */ if ((p1 - src) != (src_end - p2)) goto failed; if (memcmp(src, p2, p1 - src) != 0) goto failed; return PyString_FromStringAndSize((char *)p1, p2 - p1); failed: PyErr_Format(PyExc_ValueError, "Broken dollar-quoted string"); return NULL; } static PyObject *unquote_literal(PyObject *self, PyObject *args) { unsigned char *src = NULL; Py_ssize_t src_len = 0; int stdstr = 0; PyObject *value = NULL; if (!PyArg_ParseTuple(args, "O|i", &value, &stdstr)) return NULL; if (PyString_AsStringAndSize(value, (char **)&src, &src_len) < 0) return NULL; if (src_len == 4 && strcasecmp((char *)src, "null") == 0) { Py_INCREF(Py_None); return Py_None; } if (src_len >= 2 && src[0] == '$' && src[src_len - 1] == '$') return do_dolq(src, src_len); if (src_len < 2 || src[src_len - 1] != '\'') goto badstr; if (src[0] == '\'') { src++; src_len -= 2; return stdstr ? do_sql_std(src, src_len) : do_sql_ext(src, src_len); } else if (src_len > 2 && (src[0] | 0x20) == 'e' && src[1] == '\'') { src += 2; src_len -= 3; return do_sql_ext(src, src_len); } badstr: Py_INCREF(value); return value; } /* C unescape */ static const char doc_unescape[] = "Unescape C-style escaped string.\n\n" "C implementation."; static PyObject *unescape_body(unsigned char *src, Py_ssize_t src_len) { unsigned char *dst, *src_end = src + src_len; struct Buf buf; if (src == NULL) { PyErr_Format(PyExc_TypeError, "None not allowed"); return NULL; } dst = buf_init(&buf, src_len); if (!dst) return NULL; while (src < src_end) { if (*src != '\\') { *dst++ = *src++; continue; } if (++src >= src_end) goto failed; switch (*src) { case 't': *dst++ = '\t'; src++; break; case 'n': *dst++ = '\n'; src++; break; case 'r': *dst++ = '\r'; src++; break; case 'a': *dst++ = '\a'; src++; break; case 'b': *dst++ = '\b'; src++; break; default: if (*src >= '0' && *src <= '7') { unsigned char c = *src++ - '0'; if (src < src_end && *src >= '0' && *src <= '7') { c = (c << 3) | ((*src++) - '0'); if (src < src_end && *src >= '0' && *src <= '7') c = (c << 3) | ((*src++) - '0'); } *dst++ = c; } else { *dst++ = *src++; } } } return buf_pystr(&buf, 0, dst); failed: PyErr_Format(PyExc_ValueError, "Broken string - \\ at the end"); return NULL; } static PyObject *unescape(PyObject *self, PyObject *args) { return common_quote(args, unescape_body); } /* * urlencode of dict */ static bool urlenc(struct Buf *buf, PyObject *obj) { Py_ssize_t len; unsigned char *src, *dst; PyObject *strtmp = NULL; static const unsigned char hextbl[] = "0123456789abcdef"; bool ok = false; len = get_buffer(obj, &src, &strtmp); if (len < 0) goto failed; dst = buf_get_target_for(buf, len * 3); if (!dst) goto failed; while (len--) { if ((*src >= 'a' && *src <= 'z') || (*src >= 'A' && *src <= 'Z') || (*src >= '0' && *src <= '9') || (*src == '.' || *src == '_' || *src == '-')) { *dst++ = *src++; } else if (*src == ' ') { *dst++ = '+'; src++; } else { *dst++ = '%'; *dst++ = hextbl[*src >> 4]; *dst++ = hextbl[*src & 0xF]; src++; } } buf_set_target(buf, dst); ok = true; failed: Py_CLEAR(strtmp); return ok; } /* urlencode key+val pair. val can be None */ static bool urlenc_keyval(struct Buf *buf, PyObject *key, PyObject *value, bool needAmp) { if (needAmp && !buf_put(buf, '&')) return false; if (!urlenc(buf, key)) return false; if (value != Py_None) { if (!buf_put(buf, '=')) return false; if (!urlenc(buf, value)) return false; } return true; } /* encode native dict using PyDict_Next */ static PyObject *encode_dict(PyObject *data) { PyObject *key, *value; Py_ssize_t pos = 0; bool needAmp = false; struct Buf buf; if (!buf_init(&buf, 1024)) return NULL; while (PyDict_Next(data, &pos, &key, &value)) { if (!urlenc_keyval(&buf, key, value, needAmp)) goto failed; needAmp = true; } return buf_pystr(&buf, 0, NULL); failed: buf_free(&buf); return NULL; } /* encode custom object using .iteritems() */ static PyObject *encode_dictlike(PyObject *data) { PyObject *key = NULL, *value = NULL, *tup, *iter; struct Buf buf; bool needAmp = false; if (!buf_init(&buf, 1024)) return NULL; iter = PyObject_CallMethod(data, "iteritems", NULL); if (iter == NULL) { buf_free(&buf); return NULL; } while ((tup = PyIter_Next(iter))) { key = PySequence_GetItem(tup, 0); value = key ? PySequence_GetItem(tup, 1) : NULL; Py_CLEAR(tup); if (!key || !value) goto failed; if (!urlenc_keyval(&buf, key, value, needAmp)) goto failed; needAmp = true; Py_CLEAR(key); Py_CLEAR(value); } /* allow error from iterator */ if (PyErr_Occurred()) goto failed; Py_CLEAR(iter); return buf_pystr(&buf, 0, NULL); failed: buf_free(&buf); Py_CLEAR(iter); Py_CLEAR(key); Py_CLEAR(value); return NULL; } static const char doc_db_urlencode[] = "Urlencode for database records.\n" "If a value is None the key is output without '='.\n" "\n" "C implementation."; static PyObject *db_urlencode(PyObject *self, PyObject *args) { PyObject *data; if (!PyArg_ParseTuple(args, "O", &data)) return NULL; if (PyDict_Check(data)) { return encode_dict(data); } else { return encode_dictlike(data); } } /* * urldecode to dict */ static inline int gethex(unsigned char c) { if (c >= '0' && c <= '9') return c - '0'; c |= 0x20; if (c >= 'a' && c <= 'f') return c - 'a' + 10; return -1; } static PyObject *get_elem(unsigned char *buf, unsigned char **src_p, unsigned char *src_end) { int c1, c2; unsigned char *src = *src_p; unsigned char *dst = buf; while (src < src_end) { switch (*src) { case '%': if (++src + 2 > src_end) goto hex_incomplete; if ((c1 = gethex(*src++)) < 0) goto hex_invalid; if ((c2 = gethex(*src++)) < 0) goto hex_invalid; *dst++ = (c1 << 4) | c2; break; case '+': *dst++ = ' '; src++; break; case '&': case '=': goto gotit; default: *dst++ = *src++; } } gotit: *src_p = src; return PyString_FromStringAndSize((char *)buf, dst - buf); hex_incomplete: PyErr_Format(PyExc_ValueError, "Incomplete hex code"); return NULL; hex_invalid: PyErr_Format(PyExc_ValueError, "Invalid hex code"); return NULL; } static const char doc_db_urldecode[] = "Urldecode from string to dict.\n" "NULL are detected by missing '='.\n" "Duplicate keys are ignored - only latest is kept.\n" "\n" "C implementation."; static PyObject *db_urldecode(PyObject *self, PyObject *args) { unsigned char *src, *src_end; Py_ssize_t src_len; PyObject *dict = NULL, *key = NULL, *value = NULL; struct Buf buf; if (!PyArg_ParseTuple(args, "t#", &src, &src_len)) return NULL; if (!buf_init(&buf, src_len)) return NULL; dict = PyDict_New(); if (!dict) { buf_free(&buf); return NULL; } src_end = src + src_len; while (src < src_end) { if (*src == '&') { src++; continue; } key = get_elem(buf.ptr, &src, src_end); if (!key) goto failed; if (src < src_end && *src == '=') { src++; value = get_elem(buf.ptr, &src, src_end); if (value == NULL) goto failed; } else { Py_INCREF(Py_None); value = Py_None; } /* lessen memory usage by intering */ PyString_InternInPlace(&key); if (PyDict_SetItem(dict, key, value) < 0) goto failed; Py_CLEAR(key); Py_CLEAR(value); } buf_free(&buf); return dict; failed: buf_free(&buf); Py_CLEAR(key); Py_CLEAR(value); Py_CLEAR(dict); return NULL; } /* * Module initialization */ static PyMethodDef cquoting_methods[] = { { "quote_literal", quote_literal, METH_VARARGS, doc_quote_literal }, { "quote_copy", quote_copy, METH_VARARGS, doc_quote_copy }, { "quote_bytea_raw", quote_bytea_raw, METH_VARARGS, doc_quote_bytea_raw }, { "unescape", unescape, METH_VARARGS, doc_unescape }, { "db_urlencode", db_urlencode, METH_VARARGS, doc_db_urlencode }, { "db_urldecode", db_urldecode, METH_VARARGS, doc_db_urldecode }, { "unquote_literal", unquote_literal, METH_VARARGS, doc_unquote_literal }, { NULL } }; PyMODINIT_FUNC init_cquoting(void) { PyObject *module; module = Py_InitModule("_cquoting", cquoting_methods); PyModule_AddStringConstant(module, "__doc__", "fast quoting for skytools"); } skytools-3.2.6/python/skytools/0000755000000000000000000000000012426435645013522 5ustar skytools-3.2.6/python/skytools/apipkg.py0000644000000000000000000001264512426435645015357 0ustar """ apipkg: control the exported namespace of a python package. see http://pypi.python.org/pypi/apipkg (c) holger krekel, 2009 - MIT license """ import os import sys from types import ModuleType __version__ = '1.2.dev6' def initpkg(pkgname, exportdefs, attr=dict()): """ initialize given package from the export definitions. """ oldmod = sys.modules.get(pkgname) d = {} f = getattr(oldmod, '__file__', None) if f: f = os.path.abspath(f) d['__file__'] = f if hasattr(oldmod, '__version__'): d['__version__'] = oldmod.__version__ if hasattr(oldmod, '__loader__'): d['__loader__'] = oldmod.__loader__ if hasattr(oldmod, '__path__'): d['__path__'] = [os.path.abspath(p) for p in oldmod.__path__] if '__doc__' not in exportdefs and getattr(oldmod, '__doc__', None): d['__doc__'] = oldmod.__doc__ d.update(attr) if hasattr(oldmod, "__dict__"): oldmod.__dict__.update(d) mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d) sys.modules[pkgname] = mod def importobj(modpath, attrname): module = __import__(modpath, None, None, ['__doc__']) if not attrname: return module retval = module names = attrname.split(".") for x in names: retval = getattr(retval, x) return retval class ApiModule(ModuleType): def __docget(self): try: return self.__doc except AttributeError: if '__doc__' in self.__map__: return self.__makeattr('__doc__') def __docset(self, value): self.__doc = value __doc__ = property(__docget, __docset) def __init__(self, name, importspec, implprefix=None, attr=None): self.__name__ = name self.__all__ = [x for x in importspec if x != '__onfirstaccess__'] self.__map__ = {} self.__implprefix__ = implprefix or name if attr: for name, val in attr.items(): #print "setting", self.__name__, name, val setattr(self, name, val) for name, importspec in importspec.items(): if isinstance(importspec, dict): subname = '%s.%s'%(self.__name__, name) apimod = ApiModule(subname, importspec, implprefix) sys.modules[subname] = apimod setattr(self, name, apimod) else: parts = importspec.split(':') modpath = parts.pop(0) attrname = parts and parts[0] or "" if modpath[0] == '.': modpath = implprefix + modpath if not attrname: subname = '%s.%s'%(self.__name__, name) apimod = AliasModule(subname, modpath) sys.modules[subname] = apimod if '.' not in name: setattr(self, name, apimod) else: self.__map__[name] = (modpath, attrname) def __repr__(self): l = [] if hasattr(self, '__version__'): l.append("version=" + repr(self.__version__)) if hasattr(self, '__file__'): l.append('from ' + repr(self.__file__)) if l: return '' % (self.__name__, " ".join(l)) return '' % (self.__name__,) def __makeattr(self, name): """lazily compute value for name or raise AttributeError if unknown.""" #print "makeattr", self.__name__, name target = None if '__onfirstaccess__' in self.__map__: target = self.__map__.pop('__onfirstaccess__') importobj(*target)() try: modpath, attrname = self.__map__[name] except KeyError: if target is not None and name != '__onfirstaccess__': # retry, onfirstaccess might have set attrs return getattr(self, name) raise AttributeError(name) else: result = importobj(modpath, attrname) setattr(self, name, result) try: del self.__map__[name] except KeyError: pass # in a recursive-import situation a double-del can happen return result __getattr__ = __makeattr def __dict__(self): # force all the content of the module to be loaded when __dict__ is read dictdescr = ModuleType.__dict__['__dict__'] dict = dictdescr.__get__(self) if dict is not None: hasattr(self, 'some') for name in self.__all__: try: self.__makeattr(name) except AttributeError: pass return dict __dict__ = property(__dict__) def AliasModule(modname, modpath, attrname=None): mod = [] def getmod(): if not mod: x = importobj(modpath, None) if attrname is not None: x = getattr(x, attrname) mod.append(x) return mod[0] class AliasModule(ModuleType): def __repr__(self): x = modpath if attrname: x += "." + attrname return '' % (modname, x) def __getattribute__(self, name): return getattr(getmod(), name) def __setattr__(self, name, value): setattr(getmod(), name, value) def __delattr__(self, name): delattr(getmod(), name) return AliasModule(modname) skytools-3.2.6/python/skytools/dbstruct.py0000644000000000000000000005500112426435645015727 0ustar """Find table structure and allow CREATE/DROP elements from it. """ import re import skytools from skytools import quote_ident, quote_fqident __all__ = ['TableStruct', 'SeqStruct', 'T_TABLE', 'T_CONSTRAINT', 'T_INDEX', 'T_TRIGGER', 'T_RULE', 'T_GRANT', 'T_OWNER', 'T_PKEY', 'T_ALL', 'T_SEQUENCE', 'T_PARENT', 'T_DEFAULT'] T_TABLE = 1 << 0 T_CONSTRAINT = 1 << 1 T_INDEX = 1 << 2 T_TRIGGER = 1 << 3 T_RULE = 1 << 4 T_GRANT = 1 << 5 T_OWNER = 1 << 6 T_SEQUENCE = 1 << 7 T_PARENT = 1 << 8 T_DEFAULT = 1 << 9 T_PKEY = 1 << 20 # special, one of constraints T_ALL = ( T_TABLE | T_CONSTRAINT | T_INDEX | T_SEQUENCE | T_TRIGGER | T_RULE | T_GRANT | T_OWNER | T_DEFAULT ) # # Utility functions # def find_new_name(curs, name): """Create new object name for case the old exists. Needed when creating a new table besides old one. """ # cut off previous numbers m = re.search('_[0-9]+$', name) if m: name = name[:m.start()] # now loop for i in range(1, 1000): tname = "%s_%d" % (name, i) q = "select count(1) from pg_class where relname = %s" curs.execute(q, [tname]) if curs.fetchone()[0] == 0: return tname # failed raise Exception('find_new_name failed') def rx_replace(rx, sql, new_part): """Find a regex match and replace that part with new_part.""" m = re.search(rx, sql, re.I) if not m: raise Exception('rx_replace failed: rx=%r sql=%r new=%r' % (rx, sql, new_part)) p1 = sql[:m.start()] p2 = sql[m.end():] return p1 + new_part + p2 # # Schema objects # class TElem(object): """Keeps info about one metadata object.""" SQL = "" type = 0 def get_create_sql(self, curs, new_name = None): """Return SQL statement for creating or None if not supported.""" return None def get_drop_sql(self, curs): """Return SQL statement for dropping or None of not supported.""" return None @classmethod def get_load_sql(cls, pgver): """Return SQL statement for finding objects.""" return cls.SQL class TConstraint(TElem): """Info about constraint.""" type = T_CONSTRAINT SQL = """ SELECT c.conname as name, pg_get_constraintdef(c.oid) as def, c.contype, i.indisclustered as is_clustered FROM pg_constraint c LEFT JOIN pg_index i ON c.conrelid = i.indrelid AND c.conname = (SELECT r.relname FROM pg_class r WHERE r.oid = i.indexrelid) WHERE c.conrelid = %(oid)s AND c.contype != 'f' """ def __init__(self, table_name, row): """Init constraint.""" self.table_name = table_name self.name = row['name'] self.defn = row['def'] self.contype = row['contype'] self.is_clustered = row['is_clustered'] # tag pkeys if self.contype == 'p': self.type += T_PKEY def get_create_sql(self, curs, new_table_name=None): """Generate creation SQL.""" # no ONLY here as table with childs (only case that matters) # cannot have contraints that childs do not have fmt = "ALTER TABLE %s ADD CONSTRAINT %s\n %s;" if new_table_name: name = self.name if self.contype in ('p', 'u'): name = find_new_name(curs, self.name) qtbl = quote_fqident(new_table_name) qname = quote_ident(name) else: qtbl = quote_fqident(self.table_name) qname = quote_ident(self.name) sql = fmt % (qtbl, qname, self.defn) if self.is_clustered: sql +=' ALTER TABLE ONLY %s\n CLUSTER ON %s;' % (qtbl, qname) return sql def get_drop_sql(self, curs): """Generate removal sql.""" fmt = "ALTER TABLE ONLY %s\n DROP CONSTRAINT %s;" sql = fmt % (quote_fqident(self.table_name), quote_ident(self.name)) return sql class TIndex(TElem): """Info about index.""" type = T_INDEX SQL = """ SELECT n.nspname || '.' || c.relname as name, pg_get_indexdef(i.indexrelid) as defn, c.relname as local_name, i.indisclustered as is_clustered FROM pg_index i, pg_class c, pg_namespace n WHERE c.oid = i.indexrelid AND i.indrelid = %(oid)s AND n.oid = c.relnamespace AND NOT EXISTS (select objid from pg_depend where classid = %(pg_class_oid)s and objid = c.oid and deptype = 'i') """ def __init__(self, table_name, row): self.name = row['name'] self.defn = row['defn'].replace(' USING ', '\n USING ', 1) + ';' self.is_clustered = row['is_clustered'] self.table_name = table_name self.local_name = row['local_name'] def get_create_sql(self, curs, new_table_name = None): """Generate creation SQL.""" if new_table_name: # fixme: seems broken iname = find_new_name(curs, self.name) tname = new_table_name pnew = "INDEX %s ON %s " % (quote_ident(iname), quote_fqident(tname)) rx = r"\bINDEX[ ][a-z0-9._]+[ ]ON[ ][a-z0-9._]+[ ]" sql = rx_replace(rx, self.defn, pnew) else: sql = self.defn iname = self.local_name tname = self.table_name if self.is_clustered: sql += ' ALTER TABLE ONLY %s\n CLUSTER ON %s;' % ( quote_fqident(tname), quote_ident(iname)) return sql def get_drop_sql(self, curs): return 'DROP INDEX %s;' % quote_fqident(self.name) class TRule(TElem): """Info about rule.""" type = T_RULE SQL = """SELECT rw.*, pg_get_ruledef(rw.oid) as def FROM pg_rewrite rw WHERE rw.ev_class = %(oid)s AND rw.rulename <> '_RETURN'::name """ def __init__(self, table_name, row, new_name = None): self.table_name = table_name self.name = row['rulename'] self.defn = row['def'] self.enabled = row.get('ev_enabled', 'O') def get_create_sql(self, curs, new_table_name = None): """Generate creation SQL.""" if not new_table_name: sql = self.defn table = self.table_name else: idrx = r'''([a-z0-9._]+|"([^"]+|"")+")+''' # fixme: broken / quoting rx = r"\bTO[ ]" + idrx rc = re.compile(rx, re.X) m = rc.search(self.defn) if not m: raise Exception('Cannot find table name in rule') old_tbl = m.group(1) new_tbl = quote_fqident(new_table_name) sql = self.defn.replace(old_tbl, new_tbl) table = new_table_name if self.enabled != 'O': # O - rule fires in origin and local modes # D - rule is disabled # R - rule fires in replica mode # A - rule fires always action = {'R': 'ENABLE REPLICA', 'A': 'ENABLE ALWAYS', 'D': 'DISABLE'} [self.enabled] sql += ('\nALTER TABLE %s %s RULE %s;' % (table, action, self.name)) return sql def get_drop_sql(self, curs): return 'DROP RULE %s ON %s' % (quote_ident(self.name), quote_fqident(self.table_name)) class TTrigger(TElem): """Info about trigger.""" type = T_TRIGGER def __init__(self, table_name, row): self.table_name = table_name self.name = row['name'] self.defn = row['def'] + ';' self.defn = self.defn.replace('FOR EACH', '\n FOR EACH', 1) def get_create_sql(self, curs, new_table_name = None): """Generate creation SQL.""" if not new_table_name: return self.defn # fixme: broken / quoting rx = r"\bON[ ][a-z0-9._]+[ ]" pnew = "ON %s " % new_table_name return rx_replace(rx, self.defn, pnew) def get_drop_sql(self, curs): return 'DROP TRIGGER %s ON %s' % (quote_ident(self.name), quote_fqident(self.table_name)) @classmethod def get_load_sql(cls, pg_vers): """Return SQL statement for finding objects.""" sql = "SELECT tgname as name, pg_get_triggerdef(oid) as def "\ " FROM pg_trigger "\ " WHERE tgrelid = %(oid)s AND " if pg_vers >= 90000: sql += "NOT tgisinternal" else: sql += "NOT tgisconstraint" return sql class TParent(TElem): """Info about trigger.""" type = T_PARENT SQL = """ SELECT n.nspname||'.'||c.relname AS name FROM pg_inherits i JOIN pg_class c ON i.inhparent = c.oid JOIN pg_namespace n ON c.relnamespace = n.oid WHERE i.inhrelid = %(oid)s """ def __init__(self, table_name, row): self.name = table_name self.parent_name = row['name'] def get_create_sql(self, curs, new_table_name = None): return 'ALTER TABLE ONLY %s\n INHERIT %s' % (quote_fqident(self.name), quote_fqident(self.parent_name)) def get_drop_sql(self, curs): return 'ALTER TABLE ONLY %s\n NO INHERIT %s' % (quote_fqident(self.name), quote_fqident(self.parent_name)) class TOwner(TElem): """Info about table owner.""" type = T_OWNER SQL = """ SELECT pg_get_userbyid(relowner) as owner FROM pg_class WHERE oid = %(oid)s """ def __init__(self, table_name, row, new_name = None): self.table_name = table_name self.name = 'Owner' self.owner = row['owner'] def get_create_sql(self, curs, new_name = None): """Generate creation SQL.""" if not new_name: new_name = self.table_name return 'ALTER TABLE %s\n OWNER TO %s;' % (quote_fqident(new_name), quote_ident(self.owner)) class TGrant(TElem): """Info about permissions.""" type = T_GRANT SQL = "SELECT relacl FROM pg_class where oid = %(oid)s" # Sync with: src/include/utils/acl.h acl_map = { 'a': 'INSERT', 'r': 'SELECT', 'w': 'UPDATE', 'd': 'DELETE', 'D': 'TRUNCATE', 'x': 'REFERENCES', 't': 'TRIGGER', 'X': 'EXECUTE', 'U': 'USAGE', 'C': 'CREATE', 'T': 'TEMPORARY', 'c': 'CONNECT', # old 'R': 'RULE', } def acl_to_grants(self, acl): if acl == "arwdRxt": # ALL for tables return "ALL" i = 0 lst1 = [] lst2 = [] while i < len(acl): a = self.acl_map[acl[i]] if i+1 < len(acl) and acl[i+1] == '*': lst2.append(a) i += 2 else: lst1.append(a) i += 1 return ", ".join(lst1), ", ".join(lst2) def parse_relacl(self, relacl): """Parse ACL to tuple of (user, acl, who)""" if relacl is None: return [] tup_list = [] for sacl in skytools.parse_pgarray(relacl): acl = skytools.parse_acl(sacl) if not acl: continue tup_list.append(acl) return tup_list def __init__(self, table_name, row, new_name = None): self.name = table_name self.acl_list = self.parse_relacl(row['relacl']) def get_create_sql(self, curs, new_name = None): """Generate creation SQL.""" if not new_name: new_name = self.name qtarget = quote_fqident(new_name) sql_list = [] for role, acl, who in self.acl_list: qrole = quote_ident(role) astr1, astr2 = self.acl_to_grants(acl) if astr1: sql = "GRANT %s ON %s\n TO %s;" % (astr1, qtarget, qrole) sql_list.append(sql) if astr2: sql = "GRANT %s ON %s\n TO %s WITH GRANT OPTION;" % (astr2, qtarget, qrole) sql_list.append(sql) return "\n".join(sql_list) def get_drop_sql(self, curs): sql_list = [] for user, acl, who in self.acl_list: sql = "REVOKE ALL FROM %s ON %s;" % (quote_ident(user), quote_fqident(self.name)) sql_list.append(sql) return "\n".join(sql_list) class TColumnDefault(TElem): """Info about table column default value.""" type = T_DEFAULT SQL = """ select a.attname as name, pg_get_expr(d.adbin, d.adrelid) as expr from pg_attribute a left join pg_attrdef d on (d.adrelid = a.attrelid and d.adnum = a.attnum) where a.attrelid = %(oid)s and not a.attisdropped and a.atthasdef and a.attnum > 0 order by a.attnum; """ def __init__(self, table_name, row): self.table_name = table_name self.name = row['name'] self.expr = row['expr'] def get_create_sql(self, curs, new_name = None): """Generate creation SQL.""" tbl = new_name or self.table_name sql = "ALTER TABLE ONLY %s ALTER COLUMN %s\n SET DEFAULT %s;" % ( quote_fqident(tbl), quote_ident(self.name), self.expr) return sql def get_drop_sql(self, curs): return "ALTER TABLE %s ALTER COLUMN %s\n DROP DEFAULT;" % ( quote_fqident(self.table_name), quote_ident(self.name)) class TColumn(TElem): """Info about table column.""" SQL = """ select a.attname as name, quote_ident(a.attname) as qname, format_type(a.atttypid, a.atttypmod) as dtype, a.attnotnull, (select max(char_length(aa.attname)) from pg_attribute aa where aa.attrelid = %(oid)s) as maxcol, pg_get_serial_sequence(%(fq2name)s, a.attname) as seqname from pg_attribute a left join pg_attrdef d on (d.adrelid = a.attrelid and d.adnum = a.attnum) where a.attrelid = %(oid)s and not a.attisdropped and a.attnum > 0 order by a.attnum; """ seqname = None def __init__(self, table_name, row): self.name = row['name'] fname = row['qname'].ljust(row['maxcol'] + 3) self.column_def = fname + ' ' + row['dtype'] if row['attnotnull']: self.column_def += ' not null' self.sequence = None if row['seqname']: self.seqname = skytools.unquote_fqident(row['seqname']) class TGPDistKey(TElem): """Info about GreenPlum table distribution keys""" SQL = """ select a.attname as name from pg_attribute a, gp_distribution_policy p where p.localoid = %(oid)s and a.attrelid = %(oid)s and a.attnum = any(p.attrnums) order by a.attnum; """ def __init__(self, table_name, row): self.name = row['name'] class TTable(TElem): """Info about table only (columns).""" type = T_TABLE def __init__(self, table_name, col_list, dist_key_list = None): self.name = table_name self.col_list = col_list self.dist_key_list = dist_key_list def get_create_sql(self, curs, new_name = None): """Generate creation SQL.""" if not new_name: new_name = self.name sql = "CREATE TABLE %s (" % quote_fqident(new_name) sep = "\n " for c in self.col_list: sql += sep + c.column_def sep = ",\n " sql += "\n)" if self.dist_key_list is not None: if self.dist_key_list != []: sql += "\ndistributed by(%s)" % ','.join(c.name for c in self.dist_key_list) else: sql += '\ndistributed randomly' sql += ";" return sql def get_drop_sql(self, curs): return "DROP TABLE %s;" % quote_fqident(self.name) class TSeq(TElem): """Info about sequence.""" type = T_SEQUENCE SQL = """SELECT *, %(owner)s as "owner" from %(fqname)s """ def __init__(self, seq_name, row): self.name = seq_name defn = '' self.owner = row['owner'] if row['increment_by'] != 1: defn += ' INCREMENT BY %d' % row['increment_by'] if row['min_value'] != 1: defn += ' MINVALUE %d' % row['min_value'] if row['max_value'] != 9223372036854775807: defn += ' MAXVALUE %d' % row['max_value'] last_value = row['last_value'] if row['is_called']: last_value += row['increment_by'] if last_value >= row['max_value']: raise Exception('duh, seq passed max_value') if last_value != 1: defn += ' START %d' % last_value if row['cache_value'] != 1: defn += ' CACHE %d' % row['cache_value'] if row['is_cycled']: defn += ' CYCLE ' if self.owner: defn += ' OWNED BY %s' % self.owner self.defn = defn def get_create_sql(self, curs, new_seq_name = None): """Generate creation SQL.""" # we are in table def, forget full def if self.owner: sql = "ALTER SEQUENCE %s\n OWNED BY %s;" % ( quote_fqident(self.name), self.owner ) return sql name = self.name if new_seq_name: name = new_seq_name sql = 'CREATE SEQUENCE %s %s;' % (quote_fqident(name), self.defn) return sql def get_drop_sql(self, curs): if self.owner: return '' return 'DROP SEQUENCE %s;' % quote_fqident(self.name) # # Main table object, loads all the others # class BaseStruct(object): """Collects and manages all info about a higher-level db object. Allow to issue CREATE/DROP statements about any group of elements. """ object_list = [] def __init__(self, curs, name): """Initializes class by loading info about table_name from database.""" self.name = name self.fqname = quote_fqident(name) def _load_elem(self, curs, name, args, eclass): """Fetch element(s) from db.""" elem_list = [] #print "Loading %s, name=%s, args=%s" % (repr(eclass), repr(name), repr(args)) sql = eclass.get_load_sql(curs.connection.server_version) curs.execute(sql % args) for row in curs.fetchall(): elem_list.append(eclass(name, row)) return elem_list def create(self, curs, objs, new_table_name = None, log = None): """Issues CREATE statements for requested set of objects. If new_table_name is giver, creates table under that name and also tries to rename all indexes/constraints that conflict with existing table. """ for o in self.object_list: if o.type & objs: sql = o.get_create_sql(curs, new_table_name) if not sql: continue if log: log.info('Creating %s' % o.name) log.debug(sql) curs.execute(sql) def drop(self, curs, objs, log = None): """Issues DROP statements for requested set of objects.""" # make sure the creating & dropping happen in reverse order olist = self.object_list[:] olist.reverse() for o in olist: if o.type & objs: sql = o.get_drop_sql(curs) if not sql: continue if log: log.info('Dropping %s' % o.name) log.debug(sql) curs.execute(sql) def get_create_sql(self, objs): res = [] for o in self.object_list: if o.type & objs: sql = o.get_create_sql(None, None) if sql: res.append(sql) return "".join(res) class TableStruct(BaseStruct): """Collects and manages all info about table. Allow to issue CREATE/DROP statements about any group of elements. """ def __init__(self, curs, table_name): """Initializes class by loading info about table_name from database.""" BaseStruct.__init__(self, curs, table_name) self.table_name = table_name # fill args schema, name = skytools.fq_name_parts(table_name) args = { 'schema': schema, 'table': name, 'fqname': self.fqname, 'fq2name': skytools.quote_literal(self.fqname), 'oid': skytools.get_table_oid(curs, table_name), 'pg_class_oid': skytools.get_table_oid(curs, 'pg_catalog.pg_class'), } # load table struct self.col_list = self._load_elem(curs, self.name, args, TColumn) # if db is GP then read also table distribution keys if skytools.exists_table(curs, "pg_catalog.gp_distribution_policy"): self.dist_key_list = self._load_elem(curs, self.name, args, TGPDistKey) else: self.dist_key_list = None self.object_list = [ TTable(table_name, self.col_list, self.dist_key_list) ] self.seq_list = [] # load seqs for col in self.col_list: if col.seqname: fqname = quote_fqident(col.seqname) owner = self.fqname + '.' + quote_ident(col.name) seq_args = { 'fqname': fqname, 'owner': skytools.quote_literal(owner) } self.seq_list += self._load_elem(curs, col.seqname, seq_args, TSeq) self.object_list += self.seq_list # load additional objects to_load = [TColumnDefault, TConstraint, TIndex, TTrigger, TRule, TGrant, TOwner, TParent] for eclass in to_load: self.object_list += self._load_elem(curs, self.name, args, eclass) def get_column_list(self): """Returns list of column names the table has.""" res = [] for c in self.col_list: res.append(c.name) return res class SeqStruct(BaseStruct): """Collects and manages all info about sequence. Allow to issue CREATE/DROP statements about any group of elements. """ def __init__(self, curs, seq_name): """Initializes class by loading info about table_name from database.""" BaseStruct.__init__(self, curs, seq_name) # fill args args = { 'fqname': self.fqname, 'owner': 'null' } # load table struct self.object_list = self._load_elem(curs, seq_name, args, TSeq) def test(): from skytools import connect_database db = connect_database("dbname=fooz") curs = db.cursor() s = TableStruct(curs, "public.data1") s.drop(curs, T_ALL) s.create(curs, T_ALL) s.create(curs, T_ALL, "data1_new") s.create(curs, T_PKEY) if __name__ == '__main__': test() skytools-3.2.6/python/skytools/skylog.py0000644000000000000000000003226512426435645015414 0ustar """Our log handlers for Python's logging package. """ import logging import logging.handlers import os import socket import time import skytools # use fast implementation if available, otherwise fall back to reference one try: import tnetstring as tnetstrings tnetstrings.parse = tnetstrings.pop except ImportError: import skytools.tnetstrings as tnetstrings tnetstrings.dumps = tnetstrings.dump __all__ = ['getLogger'] # add TRACE level TRACE = 5 logging.TRACE = TRACE logging.addLevelName(TRACE, 'TRACE') # extra info to be added to each log record _service_name = 'unknown_svc' _job_name = 'unknown_job' _hostname = socket.gethostname() try: _hostaddr = socket.gethostbyname(_hostname) except: _hostaddr = "0.0.0.0" _log_extra = { 'job_name': _job_name, 'service_name': _service_name, 'hostname': _hostname, 'hostaddr': _hostaddr, } def set_service_name(service_name, job_name): """Set info about current script.""" global _service_name, _job_name _service_name = service_name _job_name = job_name _log_extra['job_name'] = _job_name _log_extra['service_name'] = _service_name # # How to make extra fields available to all log records: # 1. Use own getLogger() # - messages logged otherwise (eg. from some libs) # will crash the logging. # 2. Fix record in own handlers # - works only with custom handlers, standard handlers will # crash is used with custom fmt string. # 3. Change root logger # - can't do it after non-root loggers are initialized, # doing it before will depend on import order. # 4. Update LogRecord.__dict__ # - fails, as formatter uses obj.__dict__ directly. # 5. Change LogRecord class # - ugly but seems to work. # _OldLogRecord = logging.LogRecord class _NewLogRecord(_OldLogRecord): def __init__(self, *args): _OldLogRecord.__init__(self, *args) self.__dict__.update(_log_extra) logging.LogRecord = _NewLogRecord # configurable file logger class EasyRotatingFileHandler(logging.handlers.RotatingFileHandler): """Easier setup for RotatingFileHandler.""" def __init__(self, filename, maxBytes = 10*1024*1024, backupCount = 3): """Args same as for RotatingFileHandler, but in filename '~' is expanded.""" fn = os.path.expanduser(filename) logging.handlers.RotatingFileHandler.__init__(self, fn, maxBytes=maxBytes, backupCount=backupCount) # send JSON message over UDP class UdpLogServerHandler(logging.handlers.DatagramHandler): """Sends log records over UDP to logserver in JSON format.""" # map logging levels to logserver levels _level_map = { logging.DEBUG : 'DEBUG', logging.INFO : 'INFO', logging.WARNING : 'WARN', logging.ERROR : 'ERROR', logging.CRITICAL: 'FATAL', } # JSON message template _log_template = '{\n\t'\ '"logger": "skytools.UdpLogServer",\n\t'\ '"timestamp": %.0f,\n\t'\ '"level": "%s",\n\t'\ '"thread": null,\n\t'\ '"message": %s,\n\t'\ '"properties": {"application":"%s", "apptype": "%s", "type": "sys", "hostname":"%s", "hostaddr": "%s"}\n'\ '}\n' # cut longer msgs MAXMSG = 1024 def makePickle(self, record): """Create message in JSON format.""" # get & cut msg msg = self.format(record) if len(msg) > self.MAXMSG: msg = msg[:self.MAXMSG] txt_level = self._level_map.get(record.levelno, "ERROR") hostname = _hostname hostaddr = _hostaddr jobname = _job_name svcname = _service_name pkt = self._log_template % (time.time()*1000, txt_level, skytools.quote_json(msg), jobname, svcname, hostname, hostaddr) return pkt def send(self, s): """Disable socket caching.""" sock = self.makeSocket() sock.sendto(s, (self.host, self.port)) sock.close() # send TNetStrings message over UDP class UdpTNetStringsHandler(logging.handlers.DatagramHandler): """ Sends log records in TNetStrings format over UDP. """ # LogRecord fields to send send_fields = [ 'created', 'exc_text', 'levelname', 'levelno', 'message', 'msecs', 'name', 'hostaddr', 'hostname', 'job_name', 'service_name'] _udp_reset = 0 def makePickle(self, record): """ Create message in TNetStrings format. """ msg = {} self.format(record) # render 'message' attribute and others for k in self.send_fields: msg[k] = record.__dict__[k] tnetstr = tnetstrings.dumps(msg) return tnetstr def send(self, s): """ Cache socket for a moment, then recreate it. """ now = time.time() if now - 1 > self._udp_reset: if self.sock: self.sock.close() self.sock = self.makeSocket() self._udp_reset = now self.sock.sendto(s, (self.host, self.port)) class LogDBHandler(logging.handlers.SocketHandler): """Sends log records into PostgreSQL server. Additionally, does some statistics aggregating, to avoid overloading log server. It subclasses SocketHandler to get throtthling for failed connections. """ # map codes to string _level_map = { logging.DEBUG : 'DEBUG', logging.INFO : 'INFO', logging.WARNING : 'WARNING', logging.ERROR : 'ERROR', logging.CRITICAL: 'FATAL', } def __init__(self, connect_string): """ Initializes the handler with a specific connection string. """ logging.handlers.SocketHandler.__init__(self, None, None) self.closeOnError = 1 self.connect_string = connect_string self.stat_cache = {} self.stat_flush_period = 60 # send first stat line immidiately self.last_stat_flush = 0 def createSocket(self): try: logging.handlers.SocketHandler.createSocket(self) except: self.sock = self.makeSocket() def makeSocket(self): """Create server connection. In this case its not socket but database connection.""" db = skytools.connect_database(self.connect_string) db.set_isolation_level(0) # autocommit return db def emit(self, record): """Process log record.""" # we do not want log debug messages if record.levelno < logging.INFO: return try: self.process_rec(record) except (SystemExit, KeyboardInterrupt): raise except: self.handleError(record) def process_rec(self, record): """Aggregate stats if needed, and send to logdb.""" # render msg msg = self.format(record) # dont want to send stats too ofter if record.levelno == logging.INFO and msg and msg[0] == "{": self.aggregate_stats(msg) if time.time() - self.last_stat_flush >= self.stat_flush_period: self.flush_stats(_job_name) return if record.levelno < logging.INFO: self.flush_stats(_job_name) # dont send more than one line ln = msg.find('\n') if ln > 0: msg = msg[:ln] txt_level = self._level_map.get(record.levelno, "ERROR") self.send_to_logdb(_job_name, txt_level, msg) def aggregate_stats(self, msg): """Sum stats together, to lessen load on logdb.""" msg = msg[1:-1] for rec in msg.split(", "): k, v = rec.split(": ") agg = self.stat_cache.get(k, 0) if v.find('.') >= 0: agg += float(v) else: agg += int(v) self.stat_cache[k] = agg def flush_stats(self, service): """Send acquired stats to logdb.""" res = [] for k, v in self.stat_cache.items(): res.append("%s: %s" % (k, str(v))) if len(res) > 0: logmsg = "{%s}" % ", ".join(res) self.send_to_logdb(service, "INFO", logmsg) self.stat_cache = {} self.last_stat_flush = time.time() def send_to_logdb(self, service, type, msg): """Actual sending is done here.""" if self.sock is None: self.createSocket() if self.sock: logcur = self.sock.cursor() query = "select * from log.add(%s, %s, %s)" logcur.execute(query, [type, service, msg]) # fix unicode bug in SysLogHandler class SysLogHandler(logging.handlers.SysLogHandler): """Fixes unicode bug in logging.handlers.SysLogHandler.""" # be compatible with both 2.6 and 2.7 socktype = socket.SOCK_DGRAM _udp_reset = 0 def _custom_format(self, record): msg = self.format(record) + '\000' """ We need to convert record level to lowercase, maybe this will change in the future. """ prio = '<%d>' % self.encodePriority(self.facility, self.mapPriority(record.levelname)) msg = prio + msg return msg def emit(self, record): """ Emit a record. The record is formatted, and then sent to the syslog server. If exception information is present, it is NOT sent to the server. """ msg = self._custom_format(record) # Message is a string. Convert to bytes as required by RFC 5424 if type(msg) is unicode: msg = msg.encode('utf-8') ## this puts BOM in wrong place #if codecs: # msg = codecs.BOM_UTF8 + msg try: if self.unixsocket: try: self.socket.send(msg) except socket.error: self._connect_unixsocket(self.address) self.socket.send(msg) elif self.socktype == socket.SOCK_DGRAM: now = time.time() if now - 1 > self._udp_reset: self.socket.close() self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self._udp_reset = now self.socket.sendto(msg, self.address) else: self.socket.sendall(msg) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) class SysLogHostnameHandler(SysLogHandler): """Slightly modified standard SysLogHandler - sends also hostname and service type""" def _custom_format(self, record): msg = self.format(record) format_string = '<%d> %s %s %s\000' msg = format_string % (self.encodePriority(self.facility,self.mapPriority(record.levelname)), _hostname, _service_name, msg) return msg try: from logging import LoggerAdapter except ImportError: # LoggerAdapter is missing from python 2.5 class LoggerAdapter(object): def __init__(self, logger, extra): self.logger = logger self.extra = extra def process(self, msg, kwargs): kwargs["extra"] = self.extra return msg, kwargs def debug(self, msg, *args, **kwargs): msg, kwargs = self.process(msg, kwargs) self.logger.debug(msg, *args, **kwargs) def info(self, msg, *args, **kwargs): msg, kwargs = self.process(msg, kwargs) self.logger.info(msg, *args, **kwargs) def warning(self, msg, *args, **kwargs): msg, kwargs = self.process(msg, kwargs) self.logger.warning(msg, *args, **kwargs) def error(self, msg, *args, **kwargs): msg, kwargs = self.process(msg, kwargs) self.logger.error(msg, *args, **kwargs) def exception(self, msg, *args, **kwargs): msg, kwargs = self.process(msg, kwargs) kwargs["exc_info"] = 1 self.logger.error(msg, *args, **kwargs) def critical(self, msg, *args, **kwargs): msg, kwargs = self.process(msg, kwargs) self.logger.critical(msg, *args, **kwargs) def log(self, level, msg, *args, **kwargs): msg, kwargs = self.process(msg, kwargs) self.logger.log(level, msg, *args, **kwargs) # add missing aliases (that are in Logger class) LoggerAdapter.fatal = LoggerAdapter.critical LoggerAdapter.warn = LoggerAdapter.warning class SkyLogger(LoggerAdapter): def __init__(self, logger, extra): LoggerAdapter.__init__(self, logger, extra) self.name = logger.name def trace(self, msg, *args, **kwargs): """Log 'msg % args' with severity 'TRACE'.""" self.log(TRACE, msg, *args, **kwargs) def addHandler(self, hdlr): """Add the specified handler to this logger.""" self.logger.addHandler(hdlr) def isEnabledFor(self, level): """See if the underlying logger is enabled for the specified level.""" return self.logger.isEnabledFor(level) def getLogger(name=None, **kwargs_extra): """Get logger with extra functionality. Adds additional log levels, and extra fields to log record. name - name for logging.getLogger() kwargs_extra - extra fields to add to log record """ log = logging.getLogger(name) return SkyLogger(log, kwargs_extra) skytools-3.2.6/python/skytools/timeutil.py0000644000000000000000000001250212426435645015730 0ustar """Fill gaps in Python time API-s. parse_iso_timestamp: Parse reasonable subset of ISO_8601 timestamp formats. [ http://en.wikipedia.org/wiki/ISO_8601 ] datetime_to_timestamp: Get POSIX timestamp from datetime() object. """ import re import time from datetime import datetime, timedelta, tzinfo __all__ = ['parse_iso_timestamp', 'FixedOffsetTimezone', 'datetime_to_timestamp'] try: timedelta.total_seconds # new in 2.7 except AttributeError: def total_seconds(td): return float (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6 import ctypes _get_dict = ctypes.pythonapi._PyObject_GetDictPtr _get_dict.restype = ctypes.POINTER(ctypes.py_object) _get_dict.argtypes = [ctypes.py_object] d = _get_dict(timedelta)[0] d['total_seconds'] = total_seconds class FixedOffsetTimezone(tzinfo): """Fixed offset in minutes east from UTC.""" __slots__ = ('__offset', '__name') def __init__(self, offset): self.__offset = timedelta(minutes = offset) # numeric tz name h, m = divmod(abs(offset), 60) if offset < 0: h = -h if m: self.__name = "%+03d:%02d" % (h,m) else: self.__name = "%+03d" % h def utcoffset(self, dt): return self.__offset def tzname(self, dt): return self.__name def dst(self, dt): return ZERO ZERO = timedelta(0) # # Parse ISO_8601 timestamps. # """ TODO: - support more combinations from ISO 8601 (only reasonable ones) - cache TZ objects - make it faster? """ _iso_regex = r""" \s* (?P \d\d\d\d) [-] (?P \d\d) [-] (?P \d\d) [ T] (?P \d\d) [:] (?P \d\d) (?: [:] (?P \d\d ) (?: [.,] (?P \d+))? )? (?: \s* (?P [-+]) (?P \d\d) (?: [:]? (?P \d\d))? | (?P Z ) )? \s* $ """ _iso_rc = None def parse_iso_timestamp(s, default_tz = None): """Parse ISO timestamp to datetime object. YYYY-MM-DD[ T]HH:MM[:SS[.ss]][-+HH[:MM]] Assumes that second fractions are zero-trimmed from the end, so '.15' means 150000 microseconds. If the timezone offset is not present, use default_tz as tzinfo. By default its None, meaning the datetime object will be without tz. Only fixed offset timezones are supported. >>> str(parse_iso_timestamp('2005-06-01 15:00')) '2005-06-01 15:00:00' >>> str(parse_iso_timestamp(' 2005-06-01T15:00 +02 ')) '2005-06-01 15:00:00+02:00' >>> str(parse_iso_timestamp('2005-06-01 15:00:33+02:00')) '2005-06-01 15:00:33+02:00' >>> d = parse_iso_timestamp('2005-06-01 15:00:59.33 +02') >>> d.strftime("%z %Z") '+0200 +02' >>> str(parse_iso_timestamp(str(d))) '2005-06-01 15:00:59.330000+02:00' >>> parse_iso_timestamp('2005-06-01 15:00-0530').strftime('%Y-%m-%d %H:%M %z %Z') '2005-06-01 15:00 -0530 -05:30' >>> parse_iso_timestamp('2014-10-27T11:59:13Z').strftime('%Y-%m-%d %H:%M:%S %z %Z') '2014-10-27 11:59:13 +0000 +00' """ global _iso_rc if _iso_rc is None: _iso_rc = re.compile(_iso_regex, re.X) m = _iso_rc.match(s) if not m: raise ValueError('Date not in ISO format: %s' % repr(s)) tz = default_tz if m.group('tzsign'): tzofs = int(m.group('tzhr')) * 60 if m.group('tzmin'): tzofs += int(m.group('tzmin')) if m.group('tzsign') == '-': tzofs = -tzofs tz = FixedOffsetTimezone(tzofs) elif m.group('tzname'): tz = UTC return datetime(int(m.group('year')), int(m.group('month')), int(m.group('day')), int(m.group('hour')), int(m.group('min')), m.group('sec') and int(m.group('sec')) or 0, m.group('ss') and int(m.group('ss').ljust(6, '0')) or 0, tz) # # POSIX timestamp from datetime() # UTC = FixedOffsetTimezone(0) TZ_EPOCH = datetime.fromtimestamp(0, UTC) UTC_NOTZ_EPOCH = datetime.utcfromtimestamp(0) def datetime_to_timestamp(dt, local_time=True): """Get posix timestamp from datetime() object. if dt is without timezone, then local_time specifies whether it's UTC or local time. Returns seconds since epoch as float. >>> datetime_to_timestamp(parse_iso_timestamp("2005-06-01 15:00:59.5 +02")) 1117630859.5 >>> datetime_to_timestamp(datetime.fromtimestamp(1117630859.5, UTC)) 1117630859.5 >>> datetime_to_timestamp(datetime.fromtimestamp(1117630859.5)) 1117630859.5 >>> now = datetime.utcnow() >>> now2 = datetime.utcfromtimestamp(datetime_to_timestamp(now, False)) >>> abs(now2.microsecond - now.microsecond) < 100 True >>> now2 = now2.replace(microsecond = now.microsecond) >>> now == now2 True >>> now = datetime.now() >>> now2 = datetime.fromtimestamp(datetime_to_timestamp(now)) >>> abs(now2.microsecond - now.microsecond) < 100 True >>> now2 = now2.replace(microsecond = now.microsecond) >>> now == now2 True """ if dt.tzinfo: delta = dt - TZ_EPOCH return delta.total_seconds() elif local_time: s = time.mktime(dt.timetuple()) return s + (dt.microsecond / 1000000.0) else: delta = dt - UTC_NOTZ_EPOCH return delta.total_seconds() if __name__ == '__main__': import doctest doctest.testmod() skytools-3.2.6/python/skytools/quoting.py0000644000000000000000000001436512426435645015573 0ustar # quoting.py """Various helpers for string quoting/unquoting.""" import re __all__ = [ # _pyqoting / _cquoting "quote_literal", "quote_copy", "quote_bytea_raw", "db_urlencode", "db_urldecode", "unescape", "unquote_literal", # local "quote_bytea_literal", "quote_bytea_copy", "quote_statement", "quote_ident", "quote_fqident", "quote_json", "unescape_copy", "unquote_ident", "unquote_fqident", "json_encode", "json_decode", "make_pgarray", ] try: from skytools._cquoting import * except ImportError: from skytools._pyquoting import * # # SQL quoting # def quote_bytea_literal(s): """Quote bytea for regular SQL.""" return quote_literal(quote_bytea_raw(s)) def quote_bytea_copy(s): """Quote bytea for COPY.""" return quote_copy(quote_bytea_raw(s)) def quote_statement(sql, dict_or_list): """Quote whole statement. Data values are taken from dict or list or tuple. """ if hasattr(dict_or_list, 'items'): qdict = {} for k, v in dict_or_list.items(): qdict[k] = quote_literal(v) return sql % qdict else: qvals = [quote_literal(v) for v in dict_or_list] return sql % tuple(qvals) # reserved keywords (RESERVED_KEYWORD + TYPE_FUNC_NAME_KEYWORD) _ident_kwmap = { "all":1, "analyse":1, "analyze":1, "and":1, "any":1, "array":1, "as":1, "asc":1, "asymmetric":1, "authorization":1, "binary":1, "both":1, "case":1, "cast":1, "check":1, "collate":1, "collation":1, "column":1, "concurrently":1, "constraint":1, "create":1, "cross":1, "current_catalog":1, "current_date":1, "current_role":1, "current_schema":1, "current_time":1, "current_timestamp":1, "current_user":1, "default":1, "deferrable":1, "desc":1, "distinct":1, "do":1, "else":1, "end":1, "errors":1, "except":1, "false":1, "fetch":1, "for":1, "foreign":1, "freeze":1, "from":1, "full":1, "grant":1, "group":1, "having":1, "ilike":1, "in":1, "initially":1, "inner":1, "intersect":1, "into":1, "is":1, "isnull":1, "join":1, "lateral":1, "leading":1, "left":1, "like":1, "limit":1, "localtime":1, "localtimestamp":1, "natural":1, "new":1, "not":1, "notnull":1, "null":1, "off":1, "offset":1, "old":1, "on":1, "only":1, "or":1, "order":1, "outer":1, "over":1, "overlaps":1, "placing":1, "primary":1, "references":1, "returning":1, "right":1, "select":1, "session_user":1, "similar":1, "some":1, "symmetric":1, "table":1, "then":1, "to":1, "trailing":1, "true":1, "union":1, "unique":1, "user":1, "using":1, "variadic":1, "verbose":1, "when":1, "where":1, "window":1, "with":1, } _ident_bad = re.compile(r"[^a-z0-9_]|^[0-9]") def quote_ident(s): """Quote SQL identifier. If is checked against weird symbols and keywords. """ if _ident_bad.search(s) or s in _ident_kwmap: s = '"%s"' % s.replace('"', '""') elif not s: return '""' return s def quote_fqident(s): """Quote fully qualified SQL identifier. The '.' is taken as namespace separator and all parts are quoted separately Example: >>> quote_fqident('tbl') 'public.tbl' >>> quote_fqident('Baz.Foo.Bar') '"Baz"."Foo.Bar"' """ tmp = s.split('.', 1) if len(tmp) == 1: return 'public.' + quote_ident(s) return '.'.join(map(quote_ident, tmp)) # # quoting for JSON strings # _jsre = re.compile(r'[\x00-\x1F\\/"]') _jsmap = { "\b": "\\b", "\f": "\\f", "\n": "\\n", "\r": "\\r", "\t": "\\t", "\\": "\\\\", '"': '\\"', "/": "\\/", # to avoid html attacks } def _json_quote_char(m): """Quote single char.""" c = m.group(0) try: return _jsmap[c] except KeyError: return r"\u%04x" % ord(c) def quote_json(s): """JSON style quoting.""" if s is None: return "null" return '"%s"' % _jsre.sub(_json_quote_char, s) def unescape_copy(val): r"""Removes C-style escapes, also converts "\N" to None. Example: >>> unescape_copy(r'baz\tfo\'o') "baz\tfo'o" >>> unescape_copy(r'\N') is None True """ if val == r"\N": return None return unescape(val) def unquote_ident(val): """Unquotes possibly quoted SQL identifier. >>> unquote_ident('Foo') 'foo' >>> unquote_ident('"Wei "" rd"') 'Wei " rd' """ if len(val) > 1 and val[0] == '"' and val[-1] == '"': return val[1:-1].replace('""', '"') if val.find('"') > 0: raise Exception('unsupported syntax') return val.lower() def unquote_fqident(val): """Unquotes fully-qualified possibly quoted SQL identifier. >>> unquote_fqident('foo') 'foo' >>> unquote_fqident('"Foo"."Bar "" z"') 'Foo.Bar " z' """ tmp = val.split('.', 1) return '.'.join([unquote_ident(i) for i in tmp]) # accept simplejson or py2.6+ json module # search for simplejson first as there exists # incompat 'json' module try: import simplejson as json except ImportError: try: import json except: pass def json_encode(val = None, **kwargs): """Creates JSON string from Python object. >>> json_encode({'a': 1}) '{"a": 1}' >>> json_encode('a') '"a"' >>> json_encode(['a']) '["a"]' >>> json_encode(a=1) '{"a": 1}' """ return json.dumps(val or kwargs) def json_decode(s): """Parses JSON string into Python object. >>> json_decode('[1]') [1] """ return json.loads(s) # # Create Postgres array # # any chars not in "good" set? main bad ones: [ ,{}\"] _pgarray_bad_rx = r"[^0-9a-z_.%&=()<>*/+-]" _pgarray_bad_rc = None def _quote_pgarray_elem(s): if s is None: return 'NULL' s = str(s) if _pgarray_bad_rc.search(s): s = s.replace('\\', '\\\\') return '"' + s.replace('"', r'\"') + '"' elif not s: return '""' return s def make_pgarray(lst): r"""Formats Python list as Postgres array. Reverse of parse_pgarray(). >>> make_pgarray([]) '{}' >>> make_pgarray(['foo_3',1,'',None]) '{foo_3,1,"",NULL}' >>> make_pgarray([None,',','\\',"'",'"',"{","}",'_']) '{NULL,",","\\\\","\'","\\"","{","}",_}' """ global _pgarray_bad_rc if _pgarray_bad_rc is None: _pgarray_bad_rc = re.compile(_pgarray_bad_rx) items = [_quote_pgarray_elem(v) for v in lst] return '{' + ','.join(items) + '}' if __name__ == '__main__': import doctest doctest.testmod() skytools-3.2.6/python/skytools/adminscript.py0000644000000000000000000000761012426435645016415 0ustar #! /usr/bin/env python """Admin scripting. """ import sys, inspect import skytools __all__ = ['AdminScript'] class AdminScript(skytools.DBScript): """Contains common admin script tools. Second argument (first is .ini file) is taken as command name. If class method 'cmd_' + arg exists, it is called, otherwise error is given. """ commands_without_pidfile = {} def __init__(self, service_name, args): """AdminScript init.""" skytools.DBScript.__init__(self, service_name, args) if len(self.args) < 2: self.log.error("need command") sys.exit(1) cmd = self.args[1] if cmd in self.commands_without_pidfile: self.pidfile = None if self.pidfile: self.pidfile = self.pidfile + ".admin" def work(self): """Non-looping work function, calls command function.""" self.set_single_loop(1) cmd = self.args[1] cmdargs = self.args[2:] # find function fname = "cmd_" + cmd.replace('-', '_') if not hasattr(self, fname): self.log.error('bad subcommand, see --help for usage') sys.exit(1) fn = getattr(self, fname) # check if correct number of arguments (args, varargs, varkw, defaults) = inspect.getargspec(fn) n_args = len(args) - 1 # drop 'self' if varargs is None and n_args != len(cmdargs): helpstr = "" if n_args: helpstr = ": " + " ".join(args[1:]) self.log.error("command '%s' got %d args, but expects %d%s" % (cmd, len(cmdargs), n_args, helpstr)) sys.exit(1) # run command fn(*cmdargs) def fetch_list(self, db, sql, args, keycol = None): """Fetch a resultset from db, optionally turning it into value list.""" curs = db.cursor() curs.execute(sql, args) rows = curs.fetchall() db.commit() if not keycol: res = rows else: res = [r[keycol] for r in rows] return res def display_table(self, db, desc, sql, args = [], fields = [], fieldfmt = {}): """Display multirow query as a table.""" self.log.debug("display_table: %s" % skytools.quote_statement(sql, args)) curs = db.cursor() curs.execute(sql, args) rows = curs.fetchall() db.commit() if len(rows) == 0: return 0 if not fields: fields = [f[0] for f in curs.description] widths = [15] * len(fields) for row in rows: for i, k in enumerate(fields): rlen = row[k] and len(str(row[k])) or 0 widths[i] = widths[i] > rlen and widths[i] or rlen widths = [w + 2 for w in widths] fmt = '%%-%ds' * (len(widths) - 1) + '%%s' fmt = fmt % tuple(widths[:-1]) if desc: print(desc) print(fmt % tuple(fields)) print(fmt % tuple([ '-' * (w - 2) for w in widths ])) #print(fmt % tuple(['-'*15] * len(fields))) for row in rows: vals = [] for field in fields: val = row[field] if field in fieldfmt: val = fieldfmt[field](val) vals.append(val) print(fmt % tuple(vals)) print('\n') return 1 def exec_stmt(self, db, sql, args): """Run regular non-query SQL on db.""" self.log.debug("exec_stmt: %s" % skytools.quote_statement(sql, args)) curs = db.cursor() curs.execute(sql, args) db.commit() def exec_query(self, db, sql, args): """Run regular query SQL on db.""" self.log.debug("exec_query: %s" % skytools.quote_statement(sql, args)) curs = db.cursor() curs.execute(sql, args) res = curs.fetchall() db.commit() return res skytools-3.2.6/python/skytools/hashtext.py0000644000000000000000000001023612426435645015726 0ustar """ Implementation of Postgres hashing function. hashtext_old() - used up to PostgreSQL 8.3 hashtext_new() - used since PostgreSQL 8.4 >>> import skytools._chashtext >>> for i in range(3): ... print [hashtext_new_py('x' * (i*5 + j)) for j in range(5)] [-1477818771, 1074944137, -1086392228, -1992236649, -1379736791] [-370454118, 1489915569, -66683019, -2126973000, 1651296771] [755764456, -1494243903, 631527812, 28686851, -9498641] >>> for i in range(3): ... print [hashtext_old_py('x' * (i*5 + j)) for j in range(5)] [-863449762, 37835117, 294739542, -320432768, 1007638138] [1422906842, -261065348, 59863994, -162804943, 1736144510] [-682756517, 317827663, -495599455, -1411793989, 1739997714] >>> data = 'HypficUjFitraxlumCitcemkiOkIkthi' >>> p = [hashtext_old_py(data[:l]) for l in range(len(data)+1)] >>> c = [hashtext_old(data[:l]) for l in range(len(data)+1)] >>> assert p == c, '%s <> %s' % (p, c) >>> p == c True >>> p = [hashtext_new_py(data[:l]) for l in range(len(data)+1)] >>> c = [hashtext_new(data[:l]) for l in range(len(data)+1)] >>> assert p == c, '%s <> %s' % (p, c) >>> p == c True """ import sys, struct __all__ = ["hashtext_old", "hashtext_new"] # pad for last partial block PADDING = '\0' * 12 def uint32(x): """python does not have 32 bit integer so we need this hack to produce uint32 after bit operations""" return x & 0xffffffff # # Old Postgres hashtext() - lookup2 with custom initval # FMT_OLD = struct.Struct(">13)) b -= c; b -= a; b = uint32(b ^ (a<<8)) c -= a; c -= b; c = uint32(c ^ (b>>13)) a -= b; a -= c; a = uint32(a ^ (c>>12)) b -= c; b -= a; b = uint32(b ^ (a<<16)) c -= a; c -= b; c = uint32(c ^ (b>>5)) a -= b; a -= c; a = uint32(a ^ (c>>3)) b -= c; b -= a; b = uint32(b ^ (a<<10)) c -= a; c -= b; c = uint32(c ^ (b>>15)) return a, b, c def hashtext_old_py(k): """Old Postgres hashtext()""" remain = len(k) pos = 0 a = b = 0x9e3779b9 c = 3923095 # handle most of the key while remain >= 12: a2, b2, c2 = FMT_OLD.unpack_from(k, pos) a, b, c = mix_old(a + a2, b + b2, c + c2) pos += 12; remain -= 12; # handle the last 11 bytes a2, b2, c2 = FMT_OLD.unpack_from(k[pos:] + PADDING, 0) # the lowest byte of c is reserved for the length c2 = (c2 << 8) + len(k) a, b, c = mix_old(a + a2, b + b2, c + c2) # convert to signed int if (c & 0x80000000): c = -0x100000000 + c return int(c) # # New Postgres hashtext() - hacked lookup3: # - custom initval # - calls mix() when len=12 # - shifted c in last block on little-endian # FMT_NEW = struct.Struct("=LLL") def rol32(x,k): return (((x)<<(k)) | (uint32(x)>>(32-(k)))) def mix_new(a,b,c): a -= c; a ^= rol32(c, 4); c += b b -= a; b ^= rol32(a, 6); a += c c -= b; c ^= rol32(b, 8); b += a a -= c; a ^= rol32(c,16); c += b b -= a; b ^= rol32(a,19); a += c c -= b; c ^= rol32(b, 4); b += a return uint32(a), uint32(b), uint32(c) def final_new(a,b,c): c ^= b; c -= rol32(b,14) a ^= c; a -= rol32(c,11) b ^= a; b -= rol32(a,25) c ^= b; c -= rol32(b,16) a ^= c; a -= rol32(c, 4) b ^= a; b -= rol32(a,14) c ^= b; c -= rol32(b,24) return uint32(a), uint32(b), uint32(c) def hashtext_new_py(k): """New Postgres hashtext()""" remain = len(k) pos = 0 a = b = c = 0x9e3779b9 + len(k) + 3923095 # handle most of the key while remain >= 12: a2, b2, c2 = FMT_NEW.unpack_from(k, pos) a, b, c = mix_new(a + a2, b + b2, c + c2) pos += 12; remain -= 12; # handle the last 11 bytes a2, b2, c2 = FMT_NEW.unpack_from(k[pos:] + PADDING, 0) if sys.byteorder == 'little': c2 = c2 << 8 a, b, c = final_new(a + a2, b + b2, c + c2) # convert to signed int if (c & 0x80000000): c = -0x100000000 + c return int(c) try: from skytools._chashtext import hashtext_old, hashtext_new except ImportError: hashtext_old = hashtext_old_py hashtext_new = hashtext_new_py # run doctest if __name__ == '__main__': import doctest doctest.testmod() skytools-3.2.6/python/skytools/scripting.py0000644000000000000000000010631412426435645016103 0ustar """Useful functions and classes for database scripts. """ import errno import logging import logging.config import logging.handlers import optparse import os import select import signal import sys import time import skytools import skytools.skylog try: import skytools.installer_config default_skylog = skytools.installer_config.skylog except ImportError: default_skylog = 0 __pychecker__ = 'no-badexcept' __all__ = ['BaseScript', 'UsageError', 'daemonize', 'DBScript'] class UsageError(Exception): """User induced error.""" # # daemon mode # def daemonize(): """Turn the process into daemon. Goes background and disables all i/o. """ # launch new process, kill parent pid = os.fork() if pid != 0: os._exit(0) # start new session os.setsid() # stop i/o fd = os.open("/dev/null", os.O_RDWR) os.dup2(fd, 0) os.dup2(fd, 1) os.dup2(fd, 2) if fd > 2: os.close(fd) # # Pidfile locking+cleanup & daemonization combined # def run_single_process(runnable, daemon, pidfile): """Run runnable class, possibly daemonized, locked on pidfile.""" # check if another process is running if pidfile and os.path.isfile(pidfile): if skytools.signal_pidfile(pidfile, 0): print("Pidfile exists, another process running?") sys.exit(1) else: print("Ignoring stale pidfile") # daemonize if needed if daemon: daemonize() # clean only own pidfile own_pidfile = False try: if pidfile: data = str(os.getpid()) skytools.write_atomic(pidfile, data) own_pidfile = True runnable.run() finally: if own_pidfile: try: os.remove(pidfile) except: pass # # logging setup # _log_config_done = 0 _log_init_done = {} def _load_log_config(fn, defs): """Fixed fileConfig.""" # Work around fileConfig default behaviour to disable # not only old handlers on load (which slightly makes sense) # but also old logger objects (which does not make sense). if sys.hexversion >= 0x2060000: logging.config.fileConfig(fn, defs, False) else: logging.config.fileConfig(fn, defs) root = logging.getLogger() for lg in root.manager.loggerDict.values(): lg.disabled = 0 def _init_log(job_name, service_name, cf, log_level, is_daemon): """Logging setup happens here.""" global _log_init_done, _log_config_done got_skylog = 0 use_skylog = cf.getint("use_skylog", default_skylog) # if non-daemon, avoid skylog if script is running on console. # set use_skylog=2 to disable. if not is_daemon and use_skylog == 1: if os.isatty(sys.stdout.fileno()): use_skylog = 0 # load logging config if needed if use_skylog and not _log_config_done: # python logging.config braindamage: # cannot specify external classess without such hack logging.skylog = skytools.skylog skytools.skylog.set_service_name(service_name, job_name) # load general config flist = cf.getlist('skylog_locations', ['skylog.ini', '~/.skylog.ini', '/etc/skylog.ini']) for fn in flist: fn = os.path.expanduser(fn) if os.path.isfile(fn): defs = {'job_name': job_name, 'service_name': service_name} _load_log_config(fn, defs) got_skylog = 1 break _log_config_done = 1 if not got_skylog: sys.stderr.write("skylog.ini not found!\n") sys.exit(1) # avoid duplicate logging init for job_name log = logging.getLogger(job_name) if job_name in _log_init_done: return log _log_init_done[job_name] = 1 # tune level on root logger root = logging.getLogger() root.setLevel(log_level) # compatibility: specify ini file in script config def_fmt = '%(asctime)s %(process)s %(levelname)s %(message)s' def_datefmt = '' # None logfile = cf.getfile("logfile", "") if logfile: fstr = cf.get('logfmt_file', def_fmt) fstr_date = cf.get('logdatefmt_file', def_datefmt) if log_level < logging.INFO: fstr = cf.get('logfmt_file_verbose', fstr) fstr_date = cf.get('logdatefmt_file_verbose', fstr_date) fmt = logging.Formatter(fstr, fstr_date) size = cf.getint('log_size', 10*1024*1024) num = cf.getint('log_count', 3) hdlr = logging.handlers.RotatingFileHandler( logfile, 'a', size, num) hdlr.setFormatter(fmt) root.addHandler(hdlr) # if skylog.ini is disabled or not available, log at least to stderr if not got_skylog: fstr = cf.get('logfmt_console', def_fmt) fstr_date = cf.get('logdatefmt_console', def_datefmt) if log_level < logging.INFO: fstr = cf.get('logfmt_console_verbose', fstr) fstr_date = cf.get('logdatefmt_console_verbose', fstr_date) hdlr = logging.StreamHandler() fmt = logging.Formatter(fstr, fstr_date) hdlr.setFormatter(fmt) root.addHandler(hdlr) return log class BaseScript(object): """Base class for service scripts. Handles logging, daemonizing, config, errors. Config template:: ## Parameters for skytools.BaseScript ## # how many seconds to sleep between work loops # if missing or 0, then instead sleeping, the script will exit loop_delay = 1.0 # where to log logfile = ~/log/%(job_name)s.log # where to write pidfile pidfile = ~/pid/%(job_name)s.pid # per-process name to use in logging #job_name = %(config_name)s # whether centralized logging should be used # search-path [ ./skylog.ini, ~/.skylog.ini, /etc/skylog.ini ] # 0 - disabled # 1 - enabled, unless non-daemon on console (os.isatty()) # 2 - always enabled #use_skylog = 0 # where to find skylog.ini #skylog_locations = skylog.ini, ~/.skylog.ini, /etc/skylog.ini # how many seconds to sleep after catching a exception #exception_sleep = 20 """ service_name = None job_name = None cf = None cf_defaults = {} pidfile = None # >0 - sleep time if work() requests sleep # 0 - exit if work requests sleep # <0 - run work() once [same as looping=0] loop_delay = 1.0 # 0 - run work() once # 1 - run work() repeatedly looping = 1 # result from last work() call: # 1 - there is probably more work, don't sleep # 0 - no work, sleep before calling again # -1 - exception was thrown work_state = 1 # setup logger here, this allows override by subclass log = logging.getLogger('skytools.BaseScript') def __init__(self, service_name, args): """Script setup. User class should override work() and optionally __init__(), startup(), reload(), reset(), shutdown() and init_optparse(). NB: In case of daemon, __init__() and startup()/work()/shutdown() will be run in different processes. So nothing fancy should be done in __init__(). @param service_name: unique name for script. It will be also default job_name, if not specified in config. @param args: cmdline args (sys.argv[1:]), but can be overridden """ self.service_name = service_name self.go_daemon = 0 self.need_reload = 0 self.stat_dict = {} self.log_level = logging.INFO # parse command line parser = self.init_optparse() self.options, self.args = parser.parse_args(args) # check args if self.options.version: self.print_version() sys.exit(0) if self.options.daemon: self.go_daemon = 1 if self.options.quiet: self.log_level = logging.WARNING if self.options.verbose > 1: self.log_level = skytools.skylog.TRACE elif self.options.verbose: self.log_level = logging.DEBUG self.cf_override = {} if self.options.set: for a in self.options.set: k, v = a.split('=', 1) self.cf_override[k.strip()] = v.strip() if self.options.ini: self.print_ini() sys.exit(0) # read config file self.reload() # init logging _init_log(self.job_name, self.service_name, self.cf, self.log_level, self.go_daemon) # send signal, if needed if self.options.cmd == "kill": self.send_signal(signal.SIGTERM) elif self.options.cmd == "stop": self.send_signal(signal.SIGINT) elif self.options.cmd == "reload": self.send_signal(signal.SIGHUP) def print_version(self): service = self.service_name if getattr(self, '__version__', None): service += ' version %s' % self.__version__ print '%s, Skytools version %s' % (service, skytools.__version__) def print_ini(self): """Prints out ini file from doc string of the script of default for dbscript Used by --ini option on command line. """ # current service name print("[%s]\n" % self.service_name) # walk class hierarchy bases = [self.__class__] while len(bases) > 0: parents = [] for c in bases: for p in c.__bases__: if p not in parents: parents.append(p) doc = c.__doc__ if doc: self._print_ini_frag(doc) bases = parents def _print_ini_frag(self, doc): # use last '::' block as config template pos = doc and doc.rfind('::\n') or -1 if pos < 0: return doc = doc[pos+2 : ].rstrip() doc = skytools.dedent(doc) # merge overrided options into output for ln in doc.splitlines(): vals = ln.split('=', 1) if len(vals) != 2: print(ln) continue k = vals[0].strip() v = vals[1].strip() if k and k[0] == '#': print(ln) k = k[1:] if k in self.cf_override: print('%s = %s' % (k, self.cf_override[k])) elif k in self.cf_override: if v: print('#' + ln) print('%s = %s' % (k, self.cf_override[k])) else: print(ln) print('') def load_config(self): """Loads and returns skytools.Config instance. By default it uses first command-line argument as config file name. Can be overridden. """ if len(self.args) < 1: print("need config file, use --help for help.") sys.exit(1) conf_file = self.args[0] return skytools.Config(self.service_name, conf_file, user_defs = self.cf_defaults, override = self.cf_override) def init_optparse(self, parser = None): """Initialize a OptionParser() instance that will be used to parse command line arguments. Note that it can be overridden both directions - either DBScript will initialize an instance and pass it to user code or user can initialize and then pass to DBScript.init_optparse(). @param parser: optional OptionParser() instance, where DBScript should attach its own arguments. @return: initialized OptionParser() instance. """ if parser: p = parser else: p = optparse.OptionParser() p.set_usage("%prog [options] INI") # generic options p.add_option("-q", "--quiet", action="store_true", help = "log only errors and warnings") p.add_option("-v", "--verbose", action="count", help = "log verbosely") p.add_option("-d", "--daemon", action="store_true", help = "go background") p.add_option("-V", "--version", action="store_true", help = "print version info and exit") p.add_option("", "--ini", action="store_true", help = "display sample ini file") p.add_option("", "--set", action="append", help = "override config setting (--set 'PARAM=VAL')") # control options g = optparse.OptionGroup(p, 'control running process') g.add_option("-r", "--reload", action="store_const", const="reload", dest="cmd", help = "reload config (send SIGHUP)") g.add_option("-s", "--stop", action="store_const", const="stop", dest="cmd", help = "stop program safely (send SIGINT)") g.add_option("-k", "--kill", action="store_const", const="kill", dest="cmd", help = "kill program immediately (send SIGTERM)") p.add_option_group(g) return p def send_signal(self, sig): if not self.pidfile: self.log.warning("No pidfile in config, nothing to do") elif os.path.isfile(self.pidfile): alive = skytools.signal_pidfile(self.pidfile, sig) if not alive: self.log.warning("pidfile exists, but process not running") else: self.log.warning("No pidfile, process not running") sys.exit(0) def set_single_loop(self, do_single_loop): """Changes whether the script will loop or not.""" if do_single_loop: self.looping = 0 else: self.looping = 1 def _boot_daemon(self): run_single_process(self, self.go_daemon, self.pidfile) def start(self): """This will launch main processing thread.""" if self.go_daemon: if not self.pidfile: self.log.error("Daemon needs pidfile") sys.exit(1) self.run_func_safely(self._boot_daemon) def stop(self): """Safely stops processing loop.""" self.looping = 0 def reload(self): "Reload config." # avoid double loading on startup if not self.cf: self.cf = self.load_config() else: self.cf.reload() self.log.info ("Config reloaded") self.job_name = self.cf.get("job_name") self.pidfile = self.cf.getfile("pidfile", '') self.loop_delay = self.cf.getfloat("loop_delay", self.loop_delay) self.exception_sleep = self.cf.getfloat("exception_sleep", 20) self.exception_quiet = self.cf.getlist("exception_quiet", []) self.exception_grace = self.cf.getfloat("exception_grace", 5*60) self.exception_reset = self.cf.getfloat("exception_reset", 15*60) def hook_sighup(self, sig, frame): "Internal SIGHUP handler. Minimal code here." self.need_reload = 1 last_sigint = 0 def hook_sigint(self, sig, frame): "Internal SIGINT handler. Minimal code here." self.stop() t = time.time() if t - self.last_sigint < 1: self.log.warning("Double ^C, fast exit") sys.exit(1) self.last_sigint = t def stat_get(self, key): """Reads a stat value.""" try: value = self.stat_dict[key] except KeyError: value = None return value def stat_put(self, key, value): """Sets a stat value.""" self.stat_dict[key] = value def stat_increase(self, key, increase = 1): """Increases a stat value.""" try: self.stat_dict[key] += increase except KeyError: self.stat_dict[key] = increase def send_stats(self): "Send statistics to log." res = [] for k, v in self.stat_dict.items(): res.append("%s: %s" % (k, v)) if len(res) == 0: return logmsg = "{%s}" % ", ".join(res) self.log.info(logmsg) self.stat_dict = {} def reset(self): "Something bad happened, reset all state." pass def run(self): "Thread main loop." # run startup, safely self.run_func_safely(self.startup) while 1: # reload config, if needed if self.need_reload: self.reload() self.need_reload = 0 # do some work work = self.run_once() if not self.looping or self.loop_delay < 0: break # remember work state self.work_state = work # should sleep? if not work: if self.loop_delay > 0: self.sleep(self.loop_delay) if not self.looping: break else: break # run shutdown, safely? self.shutdown() def run_once(self): state = self.run_func_safely(self.work, True) # send stats that was added self.send_stats() return state last_func_fail = None def run_func_safely(self, func, prefer_looping = False): "Run users work function, safely." try: r = func() if self.last_func_fail and time.time() > self.last_func_fail + self.exception_reset: self.last_func_fail = None return r except UsageError, d: self.log.error(str(d)) sys.exit(1) except MemoryError, d: try: # complex logging may not succeed self.log.exception("Job %s out of memory, exiting" % self.job_name) except MemoryError: self.log.fatal("Out of memory") sys.exit(1) except SystemExit, d: self.send_stats() if prefer_looping and self.looping and self.loop_delay > 0: self.log.info("got SystemExit(%s), exiting" % str(d)) self.reset() raise d except KeyboardInterrupt, d: self.send_stats() if prefer_looping and self.looping and self.loop_delay > 0: self.log.info("got KeyboardInterrupt, exiting") self.reset() sys.exit(1) except Exception, d: try: # this may fail too self.send_stats() except: pass if self.last_func_fail is None: self.last_func_fail = time.time() emsg = str(d).rstrip() self.reset() self.exception_hook(d, emsg) # reset and sleep self.reset() if prefer_looping and self.looping and self.loop_delay > 0: self.sleep(self.exception_sleep) return -1 sys.exit(1) def sleep(self, secs): """Make script sleep for some amount of time.""" try: time.sleep(secs) except IOError, ex: if ex.errno != errno.EINTR: raise def _is_quiet_exception(self, ex): return ((self.exception_quiet == ["ALL"] or ex.__class__.__name__ in self.exception_quiet) and self.last_func_fail and time.time() < self.last_func_fail + self.exception_grace) def exception_hook(self, det, emsg): """Called on after exception processing. Can do additional logging. @param det: exception details @param emsg: exception msg """ lm = "Job %s crashed: %s" % (self.job_name, emsg) if self._is_quiet_exception(det): self.log.warning(lm) else: self.log.exception(lm) def work(self): """Here should user's processing happen. Return value is taken as boolean - if true, the next loop starts immediately. If false, DBScript sleeps for a loop_delay. """ raise Exception("Nothing implemented?") def startup(self): """Will be called just before entering main loop. In case of daemon, if will be called in same process as work(), unlike __init__(). """ self.started = time.time() # set signals if hasattr(signal, 'SIGHUP'): signal.signal(signal.SIGHUP, self.hook_sighup) if hasattr(signal, 'SIGINT'): signal.signal(signal.SIGINT, self.hook_sigint) def shutdown(self): """Will be called just after exiting main loop. In case of daemon, if will be called in same process as work(), unlike __init__(). """ pass # define some aliases (short-cuts / backward compatibility cruft) stat_add = stat_put # Old, deprecated function. stat_inc = stat_increase ## ## DBScript ## #: how old connections need to be closed DEF_CONN_AGE = 20*60 # 20 min class DBScript(BaseScript): """Base class for database scripts. Handles database connection state. Config template:: ## Parameters for skytools.DBScript ## # default lifetime for database connections (in seconds) #connection_lifetime = 1200 """ def __init__(self, service_name, args): """Script setup. User class should override work() and optionally __init__(), startup(), reload(), reset() and init_optparse(). NB: in case of daemon, the __init__() and startup()/work() will be run in different processes. So nothing fancy should be done in __init__(). @param service_name: unique name for script. It will be also default job_name, if not specified in config. @param args: cmdline args (sys.argv[1:]), but can be overridden """ self.db_cache = {} self._db_defaults = {} self._listen_map = {} # dbname: channel_list BaseScript.__init__(self, service_name, args) def connection_hook(self, dbname, conn): pass def set_database_defaults(self, dbname, **kwargs): self._db_defaults[dbname] = kwargs def add_connect_string_profile(self, connstr, profile): """Add extra profile info to connect string. """ if profile: extra = self.cf.get("%s_extra_connstr" % profile, '') if extra: connstr += ' ' + extra return connstr def get_database(self, dbname, autocommit = 0, isolation_level = -1, cache = None, connstr = None, profile = None): """Load cached database connection. User must not store it permanently somewhere, as all connections will be invalidated on reset. """ max_age = self.cf.getint('connection_lifetime', DEF_CONN_AGE) if not cache: cache = dbname params = {} defs = self._db_defaults.get(cache, {}) params.update(defs) if isolation_level >= 0: params['isolation_level'] = isolation_level elif autocommit: params['isolation_level'] = 0 elif params.get('autocommit', 0): params['isolation_level'] = 0 elif not 'isolation_level' in params: params['isolation_level'] = skytools.I_READ_COMMITTED if not 'max_age' in params: params['max_age'] = max_age if cache in self.db_cache: dbc = self.db_cache[cache] if connstr is None: connstr = self.cf.get(dbname, '') if connstr: connstr = self.add_connect_string_profile(connstr, profile) dbc.check_connstr(connstr) else: if not connstr: connstr = self.cf.get(dbname) connstr = self.add_connect_string_profile(connstr, profile) # connstr might contain password, it is not a good idea to log it filtered_connstr = connstr pos = connstr.lower().find('password') if pos >= 0: filtered_connstr = connstr[:pos] + ' [...]' self.log.debug("Connect '%s' to '%s'" % (cache, filtered_connstr)) dbc = DBCachedConn(cache, connstr, params['max_age'], setup_func = self.connection_hook) self.db_cache[cache] = dbc clist = [] if cache in self._listen_map: clist = self._listen_map[cache] return dbc.get_connection(params['isolation_level'], clist) def close_database(self, dbname): """Explicitly close a cached connection. Next call to get_database() will reconnect. """ if dbname in self.db_cache: dbc = self.db_cache[dbname] dbc.reset() del self.db_cache[dbname] def reset(self): "Something bad happened, reset all connections." for dbc in self.db_cache.values(): dbc.reset() self.db_cache = {} BaseScript.reset(self) def run_once(self): state = BaseScript.run_once(self) # reconnect if needed for dbc in self.db_cache.values(): dbc.refresh() return state def exception_hook(self, d, emsg): """Log database and query details from exception.""" curs = getattr(d, 'cursor', None) conn = getattr(curs, 'connection', None) cname = getattr(conn, 'my_name', None) if cname: # Properly named connection cname = d.cursor.connection.my_name sql = getattr(curs, 'query', None) or '?' if len(sql) > 200: # avoid logging londiste huge batched queries sql = sql[:60] + " ..." lm = "Job %s got error on connection '%s': %s. Query: %s" % ( self.job_name, cname, emsg, sql) if self._is_quiet_exception(d): self.log.warning(lm) else: self.log.exception(lm) else: BaseScript.exception_hook(self, d, emsg) def sleep(self, secs): """Make script sleep for some amount of time.""" fdlist = [] for dbname in self._listen_map.keys(): if dbname not in self.db_cache: continue fd = self.db_cache[dbname].fileno() if fd is None: continue fdlist.append(fd) if not fdlist: return BaseScript.sleep(self, secs) try: if hasattr(select, 'poll'): p = select.poll() for fd in fdlist: p.register(fd, select.POLLIN) p.poll(int(secs * 1000)) else: select.select(fdlist, [], [], secs) except select.error, d: self.log.info('wait canceled') def _exec_cmd(self, curs, sql, args, quiet = False, prefix = None): """Internal tool: Run SQL on cursor.""" if self.options.verbose: self.log.debug("exec_cmd: %s" % skytools.quote_statement(sql, args)) _pfx = "" if prefix: _pfx = "[%s] " % prefix curs.execute(sql, args) ok = True rows = curs.fetchall() for row in rows: try: code = row['ret_code'] msg = row['ret_note'] except KeyError: self.log.error("Query does not conform to exec_cmd API:") self.log.error("SQL: %s" % skytools.quote_statement(sql, args)) self.log.error("Row: %s" % repr(row.copy())) sys.exit(1) level = code / 100 if level == 1: self.log.debug("%s%d %s" % (_pfx, code, msg)) elif level == 2: if quiet: self.log.debug("%s%d %s" % (_pfx, code, msg)) else: self.log.info("%s%s" % (_pfx, msg,)) elif level == 3: self.log.warning("%s%s" % (_pfx, msg,)) else: self.log.error("%s%s" % (_pfx, msg,)) self.log.debug("Query was: %s" % skytools.quote_statement(sql, args)) ok = False return (ok, rows) def _exec_cmd_many(self, curs, sql, baseargs, extra_list, quiet = False, prefix=None): """Internal tool: Run SQL on cursor multiple times.""" ok = True rows = [] for a in extra_list: (tmp_ok, tmp_rows) = self._exec_cmd(curs, sql, baseargs + [a], quiet, prefix) if not tmp_ok: ok = False rows += tmp_rows return (ok, rows) def exec_cmd(self, db_or_curs, q, args, commit = True, quiet = False, prefix = None): """Run SQL on db with code/value error handling.""" if hasattr(db_or_curs, 'cursor'): db = db_or_curs curs = db.cursor() else: db = None curs = db_or_curs (ok, rows) = self._exec_cmd(curs, q, args, quiet, prefix) if ok: if commit and db: db.commit() return rows else: if db: db.rollback() if self.options.verbose: raise Exception("db error") # error is already logged sys.exit(1) def exec_cmd_many(self, db_or_curs, sql, baseargs, extra_list, commit = True, quiet = False, prefix = None): """Run SQL on db multiple times.""" if hasattr(db_or_curs, 'cursor'): db = db_or_curs curs = db.cursor() else: db = None curs = db_or_curs (ok, rows) = self._exec_cmd_many(curs, sql, baseargs, extra_list, quiet, prefix) if ok: if commit and db: db.commit() return rows else: if db: db.rollback() if self.options.verbose: raise Exception("db error") # error is already logged sys.exit(1) def execute_with_retry (self, dbname, stmt, args, exceptions = None): """ Execute SQL and retry if it fails. Return number of retries and current valid cursor, or raise an exception. """ sql_retry = self.cf.getbool("sql_retry", False) sql_retry_max_count = self.cf.getint("sql_retry_max_count", 10) sql_retry_max_time = self.cf.getint("sql_retry_max_time", 300) sql_retry_formula_a = self.cf.getint("sql_retry_formula_a", 1) sql_retry_formula_b = self.cf.getint("sql_retry_formula_b", 5) sql_retry_formula_cap = self.cf.getint("sql_retry_formula_cap", 60) elist = exceptions or tuple() stime = time.time() tried = 0 dbc = None while True: try: if dbc is None: if dbname not in self.db_cache: self.get_database(dbname, autocommit=1) dbc = self.db_cache[dbname] if dbc.isolation_level != skytools.I_AUTOCOMMIT: raise skytools.UsageError ("execute_with_retry: autocommit required") else: dbc.reset() curs = dbc.get_connection(dbc.isolation_level).cursor() curs.execute (stmt, args) break except elist, e: if not sql_retry or tried >= sql_retry_max_count or time.time() - stime >= sql_retry_max_time: raise self.log.info("Job %s got error on connection %s: %s" % (self.job_name, dbname, e)) except: raise # y = a + bx , apply cap y = sql_retry_formula_a + sql_retry_formula_b * tried if sql_retry_formula_cap is not None and y > sql_retry_formula_cap: y = sql_retry_formula_cap tried += 1 self.log.info("Retry #%i in %i seconds ...", tried, y) self.sleep(y) return tried, curs def listen(self, dbname, channel): """Make connection listen for specific event channel. Listening will be activated on next .get_database() call. Basically this means that DBScript.sleep() will poll for events on that db connection, so when event appears, script will be woken up. """ if dbname not in self._listen_map: self._listen_map[dbname] = [] clist = self._listen_map[dbname] if channel not in clist: clist.append(channel) def unlisten(self, dbname, channel='*'): """Stop connection for listening on specific event channel. Listening will stop on next .get_database() call. """ if dbname not in self._listen_map: return if channel == '*': del self._listen_map[dbname] return clist = self._listen_map[dbname] try: clist.remove(channel) except ValueError: pass class DBCachedConn(object): """Cache a db connection.""" def __init__(self, name, loc, max_age = DEF_CONN_AGE, verbose = False, setup_func=None, channels=[]): self.name = name self.loc = loc self.conn = None self.conn_time = 0 self.max_age = max_age self.isolation_level = -1 self.verbose = verbose self.setup_func = setup_func self.listen_channel_list = [] def fileno(self): if not self.conn: return None return self.conn.cursor().fileno() def get_connection(self, isolation_level = -1, listen_channel_list = []): # default isolation_level is READ COMMITTED if isolation_level < 0: isolation_level = skytools.I_READ_COMMITTED # new conn? if not self.conn: self.isolation_level = isolation_level self.conn = skytools.connect_database(self.loc) self.conn.my_name = self.name self.conn.set_isolation_level(isolation_level) self.conn_time = time.time() if self.setup_func: self.setup_func(self.name, self.conn) else: if self.isolation_level != isolation_level: raise Exception("Conflict in isolation_level") self._sync_listen(listen_channel_list) # done return self.conn def _sync_listen(self, new_clist): if not new_clist and not self.listen_channel_list: return curs = self.conn.cursor() for ch in self.listen_channel_list: if ch not in new_clist: curs.execute("UNLISTEN %s" % skytools.quote_ident(ch)) for ch in new_clist: if ch not in self.listen_channel_list: curs.execute("LISTEN %s" % skytools.quote_ident(ch)) if self.isolation_level != skytools.I_AUTOCOMMIT: self.conn.commit() self.listen_channel_list = new_clist[:] def refresh(self): if not self.conn: return #for row in self.conn.notifies(): # if row[0].lower() == "reload": # self.reset() # return if not self.max_age: return if time.time() - self.conn_time >= self.max_age: self.reset() def reset(self): if not self.conn: return # drop reference conn = self.conn self.conn = None self.listen_channel_list = [] # close try: conn.close() except: pass def check_connstr(self, connstr): """Drop connection if connect string has changed. """ if self.loc != connstr: self.reset() skytools-3.2.6/python/skytools/checker.py0000755000000000000000000005023512426435645015510 0ustar #! /usr/bin/env python """Catch moment when tables are in sync on master and slave. """ import sys, time, os, subprocess import pkgloader pkgloader.require('skytools', '3.0') import skytools class TableRepair: """Checks that tables in two databases are in sync.""" def __init__(self, table_name, log): self.table_name = table_name self.fq_table_name = skytools.quote_fqident(table_name) self.log = log self.reset() def reset(self): self.cnt_insert = 0 self.cnt_update = 0 self.cnt_delete = 0 self.total_src = 0 self.total_dst = 0 self.pkey_list = [] self.common_fields = [] self.apply_fixes = False self.apply_cursor = None def do_repair(self, src_db, dst_db, where, pfx = 'repair', apply_fixes = False): """Actual comparision.""" self.reset() src_curs = src_db.cursor() dst_curs = dst_db.cursor() self.apply_fixes = apply_fixes if apply_fixes: self.apply_cursor = dst_curs self.log.info('Checking %s' % self.table_name) copy_tbl = self.gen_copy_tbl(src_curs, dst_curs, where) dump_src = "%s.%s.src" % (pfx, self.table_name) dump_dst = "%s.%s.dst" % (pfx, self.table_name) fix = "%s.%s.fix" % (pfx, self.table_name) self.log.info("Dumping src table: %s" % self.table_name) self.dump_table(copy_tbl, src_curs, dump_src) src_db.commit() self.log.info("Dumping dst table: %s" % self.table_name) self.dump_table(copy_tbl, dst_curs, dump_dst) dst_db.commit() self.log.info("Sorting src table: %s" % self.table_name) self.do_sort(dump_src, dump_src + '.sorted') self.log.info("Sorting dst table: %s" % self.table_name) self.do_sort(dump_dst, dump_dst + '.sorted') self.dump_compare(dump_src + ".sorted", dump_dst + ".sorted", fix) os.unlink(dump_src) os.unlink(dump_dst) os.unlink(dump_src + ".sorted") os.unlink(dump_dst + ".sorted") if apply_fixes: dst_db.commit() def do_sort(self, src, dst): p = subprocess.Popen(["sort", "--version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) s_ver = p.communicate()[0] del p xenv = os.environ.copy() xenv['LANG'] = 'C' xenv['LC_ALL'] = 'C' cmdline = ['sort', '-T', '.'] if s_ver.find("coreutils") > 0: cmdline.append('-S') cmdline.append('30%') cmdline.append('-o') cmdline.append(dst) cmdline.append(src) p = subprocess.Popen(cmdline, env = xenv) if p.wait() != 0: raise Exception('sort failed') def gen_copy_tbl(self, src_curs, dst_curs, where): """Create COPY expession from common fields.""" self.pkey_list = skytools.get_table_pkeys(src_curs, self.table_name) dst_pkey = skytools.get_table_pkeys(dst_curs, self.table_name) if dst_pkey != self.pkey_list: self.log.error('pkeys do not match') sys.exit(1) src_cols = skytools.get_table_columns(src_curs, self.table_name) dst_cols = skytools.get_table_columns(dst_curs, self.table_name) field_list = [] for f in self.pkey_list: field_list.append(f) for f in src_cols: if f in self.pkey_list: continue if f in dst_cols: field_list.append(f) self.common_fields = field_list fqlist = [skytools.quote_ident(col) for col in field_list] tbl_expr = "select %s from %s" % (",".join(fqlist), self.fq_table_name) if where: tbl_expr += ' where ' + where tbl_expr = "COPY (%s) TO STDOUT" % tbl_expr self.log.debug("using copy expr: %s" % tbl_expr) return tbl_expr def dump_table(self, copy_cmd, curs, fn): """Dump table to disk.""" f = open(fn, "w", 64*1024) curs.copy_expert(copy_cmd, f) self.log.info('%s: Got %d bytes' % (self.table_name, f.tell())) f.close() def get_row(self, ln): """Parse a row into dict.""" if not ln: return None t = ln[:-1].split('\t') row = {} for i in range(len(self.common_fields)): row[self.common_fields[i]] = t[i] return row def dump_compare(self, src_fn, dst_fn, fix): """Dump + compare single table.""" self.log.info("Comparing dumps: %s" % self.table_name) f1 = open(src_fn, "r", 64*1024) f2 = open(dst_fn, "r", 64*1024) src_ln = f1.readline() dst_ln = f2.readline() if src_ln: self.total_src += 1 if dst_ln: self.total_dst += 1 if os.path.isfile(fix): os.unlink(fix) while src_ln or dst_ln: keep_src = keep_dst = 0 if src_ln != dst_ln: src_row = self.get_row(src_ln) dst_row = self.get_row(dst_ln) diff = self.cmp_keys(src_row, dst_row) if diff > 0: # src > dst self.got_missed_delete(dst_row, fix) keep_src = 1 elif diff < 0: # src < dst self.got_missed_insert(src_row, fix) keep_dst = 1 else: if self.cmp_data(src_row, dst_row) != 0: self.got_missed_update(src_row, dst_row, fix) if not keep_src: src_ln = f1.readline() if src_ln: self.total_src += 1 if not keep_dst: dst_ln = f2.readline() if dst_ln: self.total_dst += 1 self.log.info("finished %s: src: %d rows, dst: %d rows,"\ " missed: %d inserts, %d updates, %d deletes" % ( self.table_name, self.total_src, self.total_dst, self.cnt_insert, self.cnt_update, self.cnt_delete)) def got_missed_insert(self, src_row, fn): """Create sql for missed insert.""" self.cnt_insert += 1 fld_list = self.common_fields fq_list = [] val_list = [] for f in fld_list: fq_list.append(skytools.quote_ident(f)) v = skytools.unescape_copy(src_row[f]) val_list.append(skytools.quote_literal(v)) q = "insert into %s (%s) values (%s);" % ( self.fq_table_name, ", ".join(fq_list), ", ".join(val_list)) self.show_fix(q, 'insert', fn) def got_missed_update(self, src_row, dst_row, fn): """Create sql for missed update.""" self.cnt_update += 1 fld_list = self.common_fields set_list = [] whe_list = [] for f in self.pkey_list: self.addcmp(whe_list, skytools.quote_ident(f), skytools.unescape_copy(src_row[f])) for f in fld_list: v1 = src_row[f] v2 = dst_row[f] if self.cmp_value(v1, v2) == 0: continue self.addeq(set_list, skytools.quote_ident(f), skytools.unescape_copy(v1)) self.addcmp(whe_list, skytools.quote_ident(f), skytools.unescape_copy(v2)) q = "update only %s set %s where %s;" % ( self.fq_table_name, ", ".join(set_list), " and ".join(whe_list)) self.show_fix(q, 'update', fn) def got_missed_delete(self, dst_row, fn): """Create sql for missed delete.""" self.cnt_delete += 1 whe_list = [] for f in self.pkey_list: self.addcmp(whe_list, skytools.quote_ident(f), skytools.unescape_copy(dst_row[f])) q = "delete from only %s where %s;" % (self.fq_table_name, " and ".join(whe_list)) self.show_fix(q, 'delete', fn) def show_fix(self, q, desc, fn): """Print/write/apply repair sql.""" self.log.debug("missed %s: %s" % (desc, q)) open(fn, "a").write("%s\n" % q) if self.apply_fixes: self.apply_cursor.execute(q) def addeq(self, list, f, v): """Add quoted SET.""" vq = skytools.quote_literal(v) s = "%s = %s" % (f, vq) list.append(s) def addcmp(self, list, f, v): """Add quoted comparision.""" if v is None: s = "%s is null" % f else: vq = skytools.quote_literal(v) s = "%s = %s" % (f, vq) list.append(s) def cmp_data(self, src_row, dst_row): """Compare data field-by-field.""" for k in self.common_fields: v1 = src_row[k] v2 = dst_row[k] if self.cmp_value(v1, v2) != 0: return -1 return 0 def cmp_value(self, v1, v2): """Compare single field, tolerates tz vs notz dates.""" if v1 == v2: return 0 # try to work around tz vs. notz z1 = len(v1) z2 = len(v2) if z1 == z2 + 3 and z2 >= 19 and v1[z2] == '+': v1 = v1[:-3] if v1 == v2: return 0 elif z1 + 3 == z2 and z1 >= 19 and v2[z1] == '+': v2 = v2[:-3] if v1 == v2: return 0 return -1 def cmp_keys(self, src_row, dst_row): """Compare primary keys of the rows. Returns 1 if src > dst, -1 if src < dst and 0 if src == dst""" # None means table is done. tag it larger than any existing row. if src_row is None: if dst_row is None: return 0 return 1 elif dst_row is None: return -1 for k in self.pkey_list: v1 = src_row[k] v2 = dst_row[k] if v1 < v2: return -1 elif v1 > v2: return 1 return 0 class Syncer(skytools.DBScript): """Checks that tables in two databases are in sync.""" lock_timeout = 10 ticker_lag_limit = 20 consumer_lag_limit = 20 def sync_table(self, cstr1, cstr2, queue_name, consumer_name, table_name): """Syncer main function. Returns (src_db, dst_db) that are in transaction where table should be in sync. """ setup_db = self.get_database('setup_db', connstr = cstr1, autocommit = 1) lock_db = self.get_database('lock_db', connstr = cstr1) src_db = self.get_database('src_db', connstr = cstr1, isolation_level = skytools.I_REPEATABLE_READ) dst_db = self.get_database('dst_db', connstr = cstr2, isolation_level = skytools.I_REPEATABLE_READ) lock_curs = lock_db.cursor() setup_curs = setup_db.cursor() src_curs = src_db.cursor() dst_curs = dst_db.cursor() self.check_consumer(setup_curs, queue_name, consumer_name) # lock table in separate connection self.log.info('Locking %s' % table_name) self.set_lock_timeout(lock_curs) lock_time = time.time() lock_curs.execute("LOCK TABLE %s IN SHARE MODE" % skytools.quote_fqident(table_name)) # now wait until consumer has updated target table until locking self.log.info('Syncing %s' % table_name) # consumer must get further than this tick tick_id = self.force_tick(setup_curs, queue_name) # try to force second tick also self.force_tick(setup_curs, queue_name) # take server time setup_curs.execute("select to_char(now(), 'YYYY-MM-DD HH24:MI:SS.MS')") tpos = setup_curs.fetchone()[0] # now wait while 1: time.sleep(0.5) q = "select now() - lag > timestamp %s, now(), lag from pgq.get_consumer_info(%s, %s)" setup_curs.execute(q, [tpos, queue_name, consumer_name]) res = setup_curs.fetchall() if len(res) == 0: raise Exception('No such consumer: %s/%s' % (queue_name, consumer_name)) row = res[0] self.log.debug("tpos=%s now=%s lag=%s ok=%s" % (tpos, row[1], row[2], row[0])) if row[0]: break # limit lock time if time.time() > lock_time + self.lock_timeout: self.log.error('Consumer lagging too much, exiting') lock_db.rollback() sys.exit(1) # take snapshot on provider side src_db.commit() src_curs.execute("SELECT 1") # take snapshot on subscriber side dst_db.commit() dst_curs.execute("SELECT 1") # release lock lock_db.commit() self.close_database('setup_db') self.close_database('lock_db') return (src_db, dst_db) def set_lock_timeout(self, curs): ms = int(1000 * self.lock_timeout) if ms > 0: q = "SET LOCAL statement_timeout = %d" % ms self.log.debug(q) curs.execute(q) def check_consumer(self, curs, queue_name, consumer_name): """ Before locking anything check if consumer is working ok. """ self.log.info("Queue: %s Consumer: %s" % (queue_name, consumer_name)) curs.execute('select current_database()') self.log.info('Actual db: %s' % curs.fetchone()[0]) # get ticker lag q = "select extract(epoch from ticker_lag) from pgq.get_queue_info(%s);" curs.execute(q, [queue_name]) ticker_lag = curs.fetchone()[0] self.log.info("Ticker lag: %s" % ticker_lag) # get consumer lag q = "select extract(epoch from lag) from pgq.get_consumer_info(%s, %s);" curs.execute(q, [queue_name, consumer_name]) res = curs.fetchall() if len(res) == 0: self.log.error('check_consumer: No such consumer: %s/%s' % (queue_name, consumer_name)) sys.exit(1) consumer_lag = res[0][0] # check that lag is acceptable self.log.info("Consumer lag: %s" % consumer_lag) if consumer_lag > ticker_lag + 10: self.log.error('Consumer lagging too much, cannot proceed') sys.exit(1) def force_tick(self, curs, queue_name): """ Force tick into source queue so that consumer can move on faster """ q = "select pgq.force_tick(%s)" curs.execute(q, [queue_name]) res = curs.fetchone() cur_pos = res[0] start = time.time() while 1: time.sleep(0.5) curs.execute(q, [queue_name]) res = curs.fetchone() if res[0] != cur_pos: # new pos return res[0] # dont loop more than 10 secs dur = time.time() - start if dur > 10 and not self.options.force: raise Exception("Ticker seems dead") class Checker(Syncer): """Checks that tables in two databases are in sync. Config options:: ## data_checker ## confdb = dbname=confdb host=confdb.service extra_connstr = user=marko # one of: compare, repair, repair-apply, compare-repair-apply check_type = compare # random params used in queries cluster_name = instance_name = proxy_host = proxy_db = # list of tables to be compared table_list = foo, bar, baz where_expr = (hashtext(key_user_name) & %%(max_slot)s) in (%%(slots)s) # gets no args source_query = select h.hostname, d.db_name from dba.cluster c join dba.cluster_host ch on (ch.key_cluster = c.id_cluster) join conf.host h on (h.id_host = ch.key_host) join dba.database d on (d.key_host = ch.key_host) where c.db_name = '%(cluster_name)s' and c.instance_name = '%(instance_name)s' and d.mk_db_type = 'partition' and d.mk_db_status = 'active' order by d.db_name, h.hostname target_query = select db_name, hostname, slots, max_slot from dba.get_cross_targets(%%(hostname)s, %%(db_name)s, '%(proxy_host)s', '%(proxy_db)s') consumer_query = select q.queue_name, c.consumer_name from conf.host h join dba.database d on (d.key_host = h.id_host) join dba.pgq_queue q on (q.key_database = d.id_database) join dba.pgq_consumer c on (c.key_queue = q.id_queue) where h.hostname = %%(hostname)s and d.db_name = %%(db_name)s and q.queue_name like 'xm%%%%' """ def __init__(self, args): """Checker init.""" Syncer.__init__(self, 'data_checker', args) self.set_single_loop(1) self.log.info('Checker starting %s' % str(args)) self.lock_timeout = self.cf.getfloat('lock_timeout', 10) self.table_list = self.cf.getlist('table_list') def work(self): """Syncer main function.""" source_query = self.cf.get('source_query') target_query = self.cf.get('target_query') consumer_query = self.cf.get('consumer_query') where_expr = self.cf.get('where_expr') extra_connstr = self.cf.get('extra_connstr') check = self.cf.get('check_type', 'compare') confdb = self.get_database('confdb', autocommit=1) curs = confdb.cursor() curs.execute(source_query) for src_row in curs.fetchall(): s_host = src_row['hostname'] s_db = src_row['db_name'] curs.execute(consumer_query, src_row) r = curs.fetchone() consumer_name = r['consumer_name'] queue_name = r['queue_name'] curs.execute(target_query, src_row) for dst_row in curs.fetchall(): d_db = dst_row['db_name'] d_host = dst_row['hostname'] cstr1 = "dbname=%s host=%s %s" % (s_db, s_host, extra_connstr) cstr2 = "dbname=%s host=%s %s" % (d_db, d_host, extra_connstr) where = where_expr % dst_row self.log.info('Source: db=%s host=%s queue=%s consumer=%s' % ( s_db, s_host, queue_name, consumer_name)) self.log.info('Target: db=%s host=%s where=%s' % (d_db, d_host, where)) for tbl in self.table_list: src_db, dst_db = self.sync_table(cstr1, cstr2, queue_name, consumer_name, tbl) if check == 'compare': self.do_compare(tbl, src_db, dst_db, where) elif check == 'repair': r = TableRepair(tbl, self.log) r.do_repair(src_db, dst_db, where, 'fix.' + tbl, False) elif check == 'repair-apply': r = TableRepair(tbl, self.log) r.do_repair(src_db, dst_db, where, 'fix.' + tbl, True) elif check == 'compare-repair-apply': ok = self.do_compare(tbl, src_db, dst_db, where) if not ok: r = TableRepair(tbl, self.log) r.do_repair(src_db, dst_db, where, 'fix.' + tbl, True) else: raise Exception('unknown check type') self.reset() def do_compare(self, tbl, src_db, dst_db, where): """Actual comparision.""" src_curs = src_db.cursor() dst_curs = dst_db.cursor() self.log.info('Counting %s' % tbl) q = "select count(1) as cnt, sum(hashtext(t.*::text)) as chksum from only _TABLE_ t where %s;" % where q = self.cf.get('compare_sql', q) q = q.replace('_TABLE_', skytools.quote_fqident(tbl)) f = "%(cnt)d rows, checksum=%(chksum)s" f = self.cf.get('compare_fmt', f) self.log.debug("srcdb: " + q) src_curs.execute(q) src_row = src_curs.fetchone() src_str = f % src_row self.log.info("srcdb: %s" % src_str) self.log.debug("dstdb: " + q) dst_curs.execute(q) dst_row = dst_curs.fetchone() dst_str = f % dst_row self.log.info("dstdb: %s" % dst_str) src_db.commit() dst_db.commit() if src_str != dst_str: self.log.warning("%s: Results do not match!" % tbl) return False else: self.log.info("%s: OK!" % tbl) return True if __name__ == '__main__': script = Checker(sys.argv[1:]) script.start() skytools-3.2.6/python/skytools/parsing.py0000644000000000000000000004004412426435645015541 0ustar """Various parsers for Postgres-specific data formats.""" import re import skytools __all__ = [ "parse_pgarray", "parse_logtriga_sql", "parse_tabbed_table", "parse_statements", 'sql_tokenizer', 'parse_sqltriga_sql', "parse_acl", "dedent", "hsize_to_bytes", "parse_connect_string", "merge_connect_string"] _rc_listelem = re.compile(r'( [^,"}]+ | ["] ( [^"\\]+ | [\\]. )* ["] )', re.X) def parse_pgarray(array): r"""Parse Postgres array and return list of items inside it. Examples: >>> parse_pgarray('{}') [] >>> parse_pgarray('{a,b,null,"null"}') ['a', 'b', None, 'null'] >>> parse_pgarray(r'{"a,a","b\"b","c\\c"}') ['a,a', 'b"b', 'c\\c'] >>> parse_pgarray("[0,3]={1,2,3}") ['1', '2', '3'] """ if array is None: return None if not array or array[0] not in ("{", "[") or array[-1] != '}': raise Exception("bad array format: must be surrounded with {}") res = [] pos = 1 # skip optional dimensions descriptor "[a,b]={...}" if array[0] == "[": pos = array.find('{') + 1 if pos < 1: raise Exception("bad array format: must be surrounded with {}") while 1: m = _rc_listelem.search(array, pos) if not m: break pos2 = m.end() item = array[pos:pos2] if len(item) == 4 and item.upper() == "NULL": val = None else: if len(item) > 0 and item[0] == '"': if len(item) == 1 or item[-1] != '"': raise Exception("bad array format: broken '\"'") item = item[1:-1] val = skytools.unescape(item) res.append(val) pos = pos2 + 1 if array[pos2] == "}": break elif array[pos2] != ",": raise Exception("bad array format: expected ,} got " + repr(array[pos2])) if pos < len(array) - 1: raise Exception("bad array format: failed to parse completely (pos=%d len=%d)" % (pos, len(array))) return res # # parse logtriga partial sql # class _logtriga_parser: """Parses logtriga/sqltriga partial SQL to values.""" def tokenizer(self, sql): """Token generator.""" for typ, tok in sql_tokenizer(sql, ignore_whitespace = True): yield tok def parse_insert(self, tk, fields, values, key_fields, key_values): """Handler for inserts.""" # (col1, col2) values ('data', null) if tk.next() != "(": raise Exception("syntax error") while 1: fields.append(tk.next()) t = tk.next() if t == ")": break elif t != ",": raise Exception("syntax error") if tk.next().lower() != "values": raise Exception("syntax error, expected VALUES") if tk.next() != "(": raise Exception("syntax error, expected (") while 1: values.append(tk.next()) t = tk.next() if t == ")": break if t == ",": continue raise Exception("expected , or ) got "+t) t = tk.next() raise Exception("expected EOF, got " + repr(t)) def parse_update(self, tk, fields, values, key_fields, key_values): """Handler for updates.""" # col1 = 'data1', col2 = null where pk1 = 'pk1' and pk2 = 'pk2' while 1: fields.append(tk.next()) if tk.next() != "=": raise Exception("syntax error") values.append(tk.next()) t = tk.next() if t == ",": continue elif t.lower() == "where": break else: raise Exception("syntax error, expected WHERE or , got "+repr(t)) while 1: fld = tk.next() key_fields.append(fld) self.pklist.append(fld) if tk.next() != "=": raise Exception("syntax error") key_values.append(tk.next()) t = tk.next() if t.lower() != "and": raise Exception("syntax error, expected AND got "+repr(t)) def parse_delete(self, tk, fields, values, key_fields, key_values): """Handler for deletes.""" # pk1 = 'pk1' and pk2 = 'pk2' while 1: fld = tk.next() key_fields.append(fld) self.pklist.append(fld) if tk.next() != "=": raise Exception("syntax error") key_values.append(tk.next()) t = tk.next() if t.lower() != "and": raise Exception("syntax error, expected AND, got "+repr(t)) def _create_dbdict(self, fields, values): fields = [skytools.unquote_ident(f) for f in fields] values = [skytools.unquote_literal(f) for f in values] return skytools.dbdict(zip(fields, values)) def parse_sql(self, op, sql, pklist=None, splitkeys=False): """Main entry point.""" if pklist is None: self.pklist = [] else: self.pklist = pklist tk = self.tokenizer(sql) fields = [] values = [] key_fields = [] key_values = [] try: if op == "I": self.parse_insert(tk, fields, values, key_fields, key_values) elif op == "U": self.parse_update(tk, fields, values, key_fields, key_values) elif op == "D": self.parse_delete(tk, fields, values, key_fields, key_values) raise Exception("syntax error") except StopIteration: # last sanity check if (len(fields) + len(key_fields) == 0 or len(fields) != len(values) or len(key_fields) != len(key_values)): raise Exception("syntax error, fields do not match values") if splitkeys: return (self._create_dbdict(key_fields, key_values), self._create_dbdict(fields, values)) return self._create_dbdict(fields + key_fields, values + key_values) def parse_logtriga_sql(op, sql, splitkeys=False): return parse_sqltriga_sql(op, sql, splitkeys=splitkeys) def parse_sqltriga_sql(op, sql, pklist=None, splitkeys=False): """Parse partial SQL used by pgq.sqltriga() back to data values. Parser has following limitations: - Expects standard_quoted_strings = off - Does not support dollar quoting. - Does not support complex expressions anywhere. (hashtext(col1) = hashtext(val1)) - WHERE expression must not contain IS (NOT) NULL - Does not support updating pk value, unless you use the splitkeys parameter. Returns dict of col->data pairs. Insert event: >>> parse_logtriga_sql('I', '(id, data) values (1, null)') {'data': None, 'id': '1'} Update event: >>> parse_logtriga_sql('U', "data='foo' where id = 1") {'data': 'foo', 'id': '1'} Delete event: >>> parse_logtriga_sql('D', "id = 1 and id2 = 'str''val'") {'id2': "str'val", 'id': '1'} If you set the splitkeys parameter, it will return two dicts, one for key fields and one for data fields. Insert event: >>> parse_logtriga_sql('I', '(id, data) values (1, null)', splitkeys=True) ({}, {'data': None, 'id': '1'}) Update event: >>> parse_logtriga_sql('U', "data='foo' where id = 1", splitkeys=True) ({'id': '1'}, {'data': 'foo'}) Delete event: >>> parse_logtriga_sql('D', "id = 1 and id2 = 'str''val'", splitkeys=True) ({'id2': "str'val", 'id': '1'}, {}) """ return _logtriga_parser().parse_sql(op, sql, pklist, splitkeys=splitkeys) def parse_tabbed_table(txt): r"""Parse a tab-separated table into list of dicts. Expect first row to be column names. Very primitive. Example: >>> parse_tabbed_table('col1\tcol2\nval1\tval2\n') [{'col2': 'val2', 'col1': 'val1'}] """ txt = txt.replace("\r\n", "\n") fields = None data = [] for ln in txt.split("\n"): if not ln: continue if not fields: fields = ln.split("\t") continue cols = ln.split("\t") if len(cols) != len(fields): continue row = dict(zip(fields, cols)) data.append(row) return data _extstr = r""" ['] (?: [^'\\]+ | \\. | [']['] )* ['] """ _stdstr = r""" ['] (?: [^']+ | [']['] )* ['] """ _name = r""" (?: [a-z_][a-z0-9_$]* | " (?: [^"]+ | "" )* " ) """ _ident = r""" (?P %s ) """ % _name _fqident = r""" (?P %s (?: \. %s )* ) """ % (_name, _name) _base_sql = r""" (?P (?P [$] (?: [_a-z][_a-z0-9]*)? [$] ) .*? (?P=dname) ) | (?P [0-9][0-9.e]* ) | (?P [$] [0-9]+ ) | (?P [%][(] [a-z_][a-z0-9_]* [)] [s] ) | (?P [{] [^{}]+ [}] ) | (?P (?: \s+ | [/][*] .*? [*][/] | [-][-][^\n]* )+ ) | (?P (?: [-+*~!@#^&|?/%<>=]+ | [,()\[\].:;] ) ) | (?P . )""" _base_sql_fq = r"%s | %s" % (_fqident, _base_sql) _base_sql = r"%s | %s" % (_ident, _base_sql) _std_sql = r"""(?: (?P [E] %s | %s ) | %s )""" % (_extstr, _stdstr, _base_sql) _std_sql_fq = r"""(?: (?P [E] %s | %s ) | %s )""" % (_extstr, _stdstr, _base_sql_fq) _ext_sql = r"""(?: (?P [E]? %s ) | %s )""" % (_extstr, _base_sql) _ext_sql_fq = r"""(?: (?P [E]? %s ) | %s )""" % (_extstr, _base_sql_fq) _std_sql_rc = _ext_sql_rc = None _std_sql_fq_rc = _ext_sql_fq_rc = None def sql_tokenizer(sql, standard_quoting = False, ignore_whitespace = False, fqident = False, show_location = False): r"""Parser SQL to tokens. Iterator, returns (toktype, tokstr) tuples. Example >>> [x for x in sql_tokenizer("select * from a.b", ignore_whitespace=True)] [('ident', 'select'), ('sym', '*'), ('ident', 'from'), ('ident', 'a'), ('sym', '.'), ('ident', 'b')] >>> [x for x in sql_tokenizer("\"c olumn\",'str''val'")] [('ident', '"c olumn"'), ('sym', ','), ('str', "'str''val'")] >>> list(sql_tokenizer('a.b a."b "" c" a.1', fqident=True, ignore_whitespace=True)) [('ident', 'a.b'), ('ident', 'a."b "" c"'), ('ident', 'a'), ('sym', '.'), ('num', '1')] """ global _std_sql_rc, _ext_sql_rc, _std_sql_fq_rc, _ext_sql_fq_rc if not _std_sql_rc: _std_sql_rc = re.compile(_std_sql, re.X | re.I | re.S) _ext_sql_rc = re.compile(_ext_sql, re.X | re.I | re.S) _std_sql_fq_rc = re.compile(_std_sql_fq, re.X | re.I | re.S) _ext_sql_fq_rc = re.compile(_ext_sql_fq, re.X | re.I | re.S) if standard_quoting: if fqident: rc = _std_sql_fq_rc else: rc = _std_sql_rc else: if fqident: rc = _ext_sql_fq_rc else: rc = _ext_sql_rc pos = 0 while 1: m = rc.match(sql, pos) if not m: break pos = m.end() typ = m.lastgroup if ignore_whitespace and typ == "ws": continue tk = m.group() if show_location: yield (typ, tk, pos) else: yield (typ, tk) _copy_from_stdin_re = "copy.*from\s+stdin" _copy_from_stdin_rc = None def parse_statements(sql, standard_quoting = False): """Parse multi-statement string into separate statements. Returns list of statements. >>> [sql for sql in parse_statements("begin; select 1; select 'foo'; end;")] ['begin;', 'select 1;', "select 'foo';", 'end;'] """ global _copy_from_stdin_rc if not _copy_from_stdin_rc: _copy_from_stdin_rc = re.compile(_copy_from_stdin_re, re.X | re.I) tokens = [] pcount = 0 # '(' level for typ, t in sql_tokenizer(sql, standard_quoting = standard_quoting): # skip whitespace and comments before statement if len(tokens) == 0 and typ == "ws": continue # keep the rest tokens.append(t) if t == "(": pcount += 1 elif t == ")": pcount -= 1 elif t == ";" and pcount == 0: sql = "".join(tokens) if _copy_from_stdin_rc.match(sql): raise Exception("copy from stdin not supported") yield ("".join(tokens)) tokens = [] if len(tokens) > 0: yield ("".join(tokens)) if pcount != 0: raise Exception("syntax error - unbalanced parenthesis") _acl_name = r'(?: [0-9a-z_]+ | " (?: [^"]+ | "" )* " )' _acl_re = r''' \s* (?: group \s+ | user \s+ )? (?P %s )? (?P = [a-z*]* )? (?P / %s )? \s* $ ''' % (_acl_name, _acl_name) _acl_rc = None def parse_acl(acl): """Parse ACL entry. >>> parse_acl('user=rwx/owner') ('user', 'rwx', 'owner') >>> parse_acl('" ""user"=rwx/" ""owner"') (' "user', 'rwx', ' "owner') >>> parse_acl('user=rwx') ('user', 'rwx', None) >>> parse_acl('=/f') (None, '', 'f') """ global _acl_rc if not _acl_rc: _acl_rc = re.compile(_acl_re, re.I | re.X) m = _acl_rc.match(acl) if not m: return None target = m.group('tgt') perm = m.group('perm') owner = m.group('owner') if target: target = skytools.unquote_ident(target) if perm: perm = perm[1:] if owner: owner = skytools.unquote_ident(owner[1:]) return (target, perm, owner) def dedent(doc): r"""Relaxed dedent. - takes whitespace to be removed from first indented line. - allows empty or non-indented lines at the start - allows first line to be unindented - skips empty lines at the start - ignores indent of empty lines - if line does not match common indent, is stays unchanged >>> dedent(' Line1:\n Line 2\n') 'Line1:\n Line 2\n' >>> dedent(' \nLine1:\n Line 2\n Line 3\n Line 4') 'Line1:\nLine 2\n Line 3\n Line 4\n' """ pfx = None res = [] for ln in doc.splitlines(): ln = ln.rstrip() if not pfx and len(res) < 2: if not ln: continue wslen = len(ln) - len(ln.lstrip()) pfx = ln[ : wslen] if pfx: if ln.startswith(pfx): ln = ln[ len(pfx) : ] res.append(ln) res.append('') return '\n'.join(res) def hsize_to_bytes (input): """ Convert sizes from human format to bytes (string to integer) """ assert isinstance (input, str) m = re.match (r"^([0-9]+) *([KMGTPEZY]?)B?$", input.strip(), re.IGNORECASE) if not m: raise ValueError ("cannot parse: %s" % input) units = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'] bytes = int(m.group(1)) * 1024 ** units.index(m.group(2).upper()) return bytes # # Connect string parsing # _cstr_rx = r""" \s* (\w+) \s* = \s* ( ' ( \\.| [^'\\] )* ' | \S+ ) \s* """ _cstr_unesc_rx = r"\\(.)" _cstr_badval_rx = r"[\s'\\]" _cstr_rc = None _cstr_unesc_rc = None _cstr_badval_rc = None def parse_connect_string(cstr): r"""Parse Postgres connect string. >>> parse_connect_string("host=foo") [('host', 'foo')] >>> parse_connect_string(r" host = foo password = ' f\\\o\'o ' ") [('host', 'foo'), ('password', "' f\\o'o '")] """ global _cstr_rc, _cstr_unesc_rc if not _cstr_rc: _cstr_rc = re.compile(_cstr_rx, re.X) _cstr_unesc_rc = re.compile(_cstr_unesc_rx) pos = 0 res = [] while pos < len(cstr): m = _cstr_rc.match(cstr, pos) if not m: raise ValueError('Invalid connect string') pos = m.end() k = m.group(1) v = m.group(2) if v[0] == "'": v = _cstr_unesc_rc.sub(r"\1", v) res.append( (k,v) ) return res def merge_connect_string(cstr_arg_list): """Put fragments back together. >>> merge_connect_string([('host', 'ip'), ('pass', ''), ('x', ' ')]) "host=ip pass='' x=' '" """ global _cstr_badval_rc if not _cstr_badval_rc: _cstr_badval_rc = re.compile(_cstr_badval_rx) buf = [] for k, v in cstr_arg_list: if not v or _cstr_badval_rc.search(v): v = v.replace('\\', r'\\') v = v.replace("'", r"\'") v = "'" + v + "'" buf.append("%s=%s" % (k, v)) return ' '.join(buf) if __name__ == '__main__': import doctest doctest.testmod() skytools-3.2.6/python/skytools/utf8.py0000644000000000000000000000505412426435645014766 0ustar r"""UTF-8 sanitizer. Python's UTF-8 parser is quite relaxed, this creates problems when talking with other software that uses stricter parsers. >>> safe_utf8_decode("foobar") (True, u'foobar') >>> safe_utf8_decode('X\xed\xa0\x80Y\xed\xb0\x89Z') (False, u'X\ufffdY\ufffdZ') >>> safe_utf8_decode('X\xed\xa0\x80\xed\xb0\x89Z') (False, u'X\U00010009Z') >>> safe_utf8_decode('X\0Z') (False, u'X\ufffdZ') >>> safe_utf8_decode('OK') (True, u'OK') >>> safe_utf8_decode('X\xF1Y') (False, u'X\ufffdY') """ import re, codecs __all__ = ['safe_utf8_decode'] # by default, use same symbol as 'replace' REPLACEMENT_SYMBOL = unichr(0xFFFD) def _fix_utf8(m): """Merge UTF16 surrogates, replace others""" u = m.group() if len(u) == 2: # merge into single symbol c1 = ord(u[0]) c2 = ord(u[1]) c = 0x10000 + ((c1 & 0x3FF) << 10) + (c2 & 0x3FF) return unichr(c) else: # use replacement symbol return REPLACEMENT_SYMBOL _urc = None def sanitize_unicode(u): """Fix invalid symbols in unicode string.""" global _urc assert isinstance(u, unicode) # regex for finding invalid chars, works on unicode string if not _urc: rx = u"[\uD800-\uDBFF] [\uDC00-\uDFFF]? | [\0\uDC00-\uDFFF]" _urc = re.compile(rx, re.X) # now find and fix UTF16 surrogates m = _urc.search(u) if m: u = _urc.sub(_fix_utf8, u) return u def safe_replace(exc): """Replace only one symbol at a time. Builtin .decode('xxx', 'replace') replaces several symbols together, which is unsafe. """ if not isinstance(exc, UnicodeDecodeError): raise exc c2 = REPLACEMENT_SYMBOL # we could assume latin1 if 0: c1 = exc.object[exc.start] c2 = unichr(ord(c1)) return c2, exc.start + 1 # register, it will be globally available codecs.register_error("safe_replace", safe_replace) def safe_utf8_decode(s): """Decode UTF-8 safely. Acts like str.decode('utf8', 'replace') but also fixes UTF16 surrogates and NUL bytes, which Python's default decoder does not do. @param s: utf8-encoded byte string @return: tuple of (was_valid_utf8, unicode_string) """ # decode with error detection ok = True try: # expect no errors by default u = s.decode('utf8') except UnicodeDecodeError: u = s.decode('utf8', 'safe_replace') ok = False u2 = sanitize_unicode(u) if u is not u2: ok = False return (ok, u2) if __name__ == '__main__': import doctest doctest.testmod() skytools-3.2.6/python/skytools/tnetstrings.py0000644000000000000000000000562612426435645016471 0ustar # Note this implementation is more strict than necessary to demonstrate # minimum restrictions on types allowed in dictionaries. def dump(data): if type(data) is long or type(data) is int: out = str(data) return '%d:%s#' % (len(out), out) elif type(data) is float: out = '%f' % data return '%d:%s^' % (len(out), out) elif type(data) is str: return '%d:' % len(data) + data + ',' elif type(data) is dict: return dump_dict(data) elif type(data) is list: return dump_list(data) elif data == None: return '0:~' elif type(data) is bool: out = repr(data).lower() return '%d:%s!' % (len(out), out) else: assert False, "Can't serialize stuff that's %s." % type(data) def parse(data): payload, payload_type, remain = parse_payload(data) if payload_type == '#': value = int(payload) elif payload_type == '}': value = parse_dict(payload) elif payload_type == ']': value = parse_list(payload) elif payload_type == '!': value = payload == 'true' elif payload_type == '^': value = float(payload) elif payload_type == '~': assert len(payload) == 0, "Payload must be 0 length for null." value = None elif payload_type == ',': value = payload else: assert False, "Invalid payload type: %r" % payload_type return value, remain def parse_payload(data): assert data, "Invalid data to parse, it's empty." length, extra = data.split(':', 1) length = int(length) payload, extra = extra[:length], extra[length:] assert extra, "No payload type: %r, %r" % (payload, extra) payload_type, remain = extra[0], extra[1:] assert len(payload) == length, "Data is wrong length %d vs %d" % (length, len(payload)) return payload, payload_type, remain def parse_list(data): if len(data) == 0: return [] result = [] value, extra = parse(data) result.append(value) while extra: value, extra = parse(extra) result.append(value) return result def parse_pair(data): key, extra = parse(data) assert extra, "Unbalanced dictionary store." value, extra = parse(extra) return key, value, extra def parse_dict(data): if len(data) == 0: return {} key, value, extra = parse_pair(data) assert type(key) is str, "Keys can only be strings." result = {key: value} while extra: key, value, extra = parse_pair(extra) result[key] = value return result def dump_dict(data): result = [] for k,v in data.items(): result.append(dump(str(k))) result.append(dump(v)) payload = ''.join(result) return '%d:' % len(payload) + payload + '}' def dump_list(data): result = [] for i in data: result.append(dump(i)) payload = ''.join(result) return '%d:' % len(payload) + payload + ']' skytools-3.2.6/python/skytools/querybuilder.py0000755000000000000000000003233512426435645016621 0ustar #! /usr/bin/env python """Helper classes for complex query generation. Main target is code execution under PL/Python. Query parameters are referenced as C{{key}} or C{{key:type}}. Type will be given to C{plpy.prepare}. If C{type} is missing, C{text} is assumed. See L{plpy_exec} for examples. """ import skytools __all__ = [ 'QueryBuilder', 'PLPyQueryBuilder', 'PLPyQuery', 'plpy_exec', "run_query", "run_query_row", "run_lookup", "run_exists", ] # make plpy available try: import plpy except ImportError: pass PARAM_INLINE = 0 # quote_literal() PARAM_DBAPI = 1 # %()s PARAM_PLPY = 2 # $n class QArgConf: """Per-query arg-type config object.""" param_type = None class QArg: """Place-holder for a query parameter.""" def __init__(self, name, value, pos, conf): self.name = name self.value = value self.pos = pos self.conf = conf def __str__(self): if self.conf.param_type == PARAM_INLINE: return skytools.quote_literal(self.value) elif self.conf.param_type == PARAM_DBAPI: return "%s" elif self.conf.param_type == PARAM_PLPY: return "$%d" % self.pos else: raise Exception("bad QArgConf.param_type") # need an structure with fast remove-from-middle # and append operations. class DList: """Simple double-linked list.""" def __init__(self): self.next = self self.prev = self def append(self, obj): obj.next = self obj.prev = self.prev self.prev.next = obj self.prev = obj def remove(self, obj): obj.next.prev = obj.prev obj.prev.next = obj.next obj.next = obj.prev = None def empty(self): return self.next == self def pop(self): """Remove and return first element.""" obj = None if not self.empty(): obj = self.next self.remove(obj) return obj class CachedPlan: """Wrapper around prepared plan.""" def __init__(self, key, plan): self.key = key # (sql, (types)) self.plan = plan class PlanCache: """Cache for limited amount of plans.""" def __init__(self, maxplans = 100): self.maxplans = maxplans self.plan_map = {} self.plan_list = DList() def get_plan(self, sql, types): """Prepare the plan and cache it.""" t = (sql, tuple(types)) if t in self.plan_map: pc = self.plan_map[t] # put to the end self.plan_list.remove(pc) self.plan_list.append(pc) return pc.plan # prepare new plan plan = plpy.prepare(sql, types) # add to cache pc = CachedPlan(t, plan) self.plan_list.append(pc) self.plan_map[t] = pc # remove plans if too much while len(self.plan_map) > self.maxplans: pc = self.plan_list.pop() del self.plan_map[pc.key] return plan class QueryBuilder: """Helper for query building. >>> args = {'success': 't', 'total': 45, 'ccy': 'EEK', 'id': 556} >>> q = QueryBuilder("update orders set total = {total} where id = {id}", args) >>> q.add(" and optional = {non_exist}") >>> q.add(" and final = {success}") >>> print q.get_sql(PARAM_INLINE) update orders set total = '45' where id = '556' and final = 't' >>> print q.get_sql(PARAM_DBAPI) update orders set total = %s where id = %s and final = %s >>> print q.get_sql(PARAM_PLPY) update orders set total = $1 where id = $2 and final = $3 """ def __init__(self, sqlexpr, params): """Init the object. @param sqlexpr: Partial sql fragment. @param params: Dict of parameter values. """ self._params = params self._arg_type_list = [] self._arg_value_list = [] self._sql_parts = [] self._arg_conf = QArgConf() self._nargs = 0 if sqlexpr: self.add(sqlexpr, required = True) def add(self, expr, type = "text", required = False): """Add SQL fragment to query. """ self._add_expr('', expr, self._params, type, required) def get_sql(self, param_type = PARAM_INLINE): """Return generated SQL (thus far) as string. Possible values for param_type: - 0: Insert values quoted with quote_literal() - 1: Insert %()s in place of parameters. - 2: Insert $n in place of parameters. """ self._arg_conf.param_type = param_type tmp = map(str, self._sql_parts) return "".join(tmp) def _add_expr(self, pfx, expr, params, type, required): parts = [] types = [] values = [] nargs = self._nargs if pfx: parts.append(pfx) pos = 0 while 1: # find start of next argument a1 = expr.find('{', pos) if a1 < 0: parts.append(expr[pos:]) break # find end end of argument name a2 = expr.find('}', a1) if a2 < 0: raise Exception("missing argument terminator: "+expr) # add plain sql if a1 > pos: parts.append(expr[pos:a1]) pos = a2 + 1 # get arg name, check if exists k = expr[a1 + 1 : a2] # split name from type tpos = k.rfind(':') if tpos > 0: kparam = k[:tpos] ktype = k[tpos+1 : ] else: kparam = k ktype = type # params==None means params are checked later if params is not None and kparam not in params: if required: raise Exception("required parameter missing: "+kparam) # optional fragment, param missing, skip it return # got arg nargs += 1 if params is not None: val = params[kparam] else: val = kparam values.append(val) types.append(ktype) arg = QArg(kparam, val, nargs, self._arg_conf) parts.append(arg) # add interesting parts to the main sql self._sql_parts.extend(parts) if types: self._arg_type_list.extend(types) if values: self._arg_value_list.extend(values) self._nargs = nargs def execute(self, curs): """Client-side query execution on DB-API 2.0 cursor. Calls C{curs.execute()} with proper arguments. Returns result of curs.execute(), although that does not return anything interesting. Later curs.fetch* methods must be called to get result. """ q = self.get_sql(PARAM_DBAPI) args = self._params return curs.execute(q, args) class PLPyQueryBuilder(QueryBuilder): def __init__(self, sqlexpr, params, plan_cache = None, sqls = None): """Init the object. @param sqlexpr: Partial sql fragment. @param params: Dict of parameter values. @param plan_cache: (PL/Python) A dict object where to store the plan cache, under the key C{"plan_cache"}. If not given, plan will not be cached and values will be inserted directly to query. Usually either C{GD} or C{SD} should be given here. @param sqls: list object where to append executed sqls (used for debugging) """ QueryBuilder.__init__(self, sqlexpr, params) self._sqls = sqls if plan_cache is not None: if 'plan_cache' not in plan_cache: plan_cache['plan_cache'] = PlanCache() self._plan_cache = plan_cache['plan_cache'] else: self._plan_cache = None def execute(self): """Server-side query execution via plpy. Query can be run either cached or uncached, depending on C{plan_cache} setting given to L{__init__}. Returns result of plpy.execute(). """ args = self._arg_value_list types = self._arg_type_list if self._sqls is not None: self._sqls.append( { "sql": self.get_sql(PARAM_INLINE) } ) if self._plan_cache is not None: sql = self.get_sql(PARAM_PLPY) plan = self._plan_cache.get_plan(sql, types) res = plpy.execute(plan, args) else: sql = self.get_sql(PARAM_INLINE) res = plpy.execute(sql) if res: res = [skytools.dbdict(r) for r in res] return res class PLPyQuery: """Static, cached PL/Python query that uses QueryBuilder formatting. See L{plpy_exec} for simple usage. """ def __init__(self, sql): qb = QueryBuilder(sql, None) p_sql = qb.get_sql(PARAM_PLPY) p_types = qb._arg_type_list self.plan = plpy.prepare(p_sql, p_types) self.arg_map = qb._arg_value_list self.sql = sql def execute(self, arg_dict, all_keys_required = True): try: if all_keys_required: arg_list = [arg_dict[k] for k in self.arg_map] else: arg_list = [arg_dict.get(k) for k in self.arg_map] return plpy.execute(self.plan, arg_list) except KeyError: need = set(self.arg_map) got = set(arg_dict.keys()) missing = list(need.difference(got)) plpy.error("Missing arguments: [%s] QUERY: %s" % ( ','.join(missing), repr(self.sql))) def __repr__(self): return 'PLPyQuery<%s>' % self.sql def plpy_exec(gd, sql, args, all_keys_required = True): """Cached plan execution for PL/Python. @param gd: dict to store cached plans under. If None, caching is disabled. @param sql: SQL statement to execute. @param args: dict of arguments to query. @param all_keys_required: if False, missing key is taken as NULL, instead of throwing error. >>> res = plpy_exec(GD, "select {arg1}, {arg2:int4}, {arg1}", {'arg1': '1', 'arg2': '2'}) DBG: plpy.prepare('select $1, $2, $3', ['text', 'int4', 'text']) DBG: plpy.execute(('PLAN', 'select $1, $2, $3', ['text', 'int4', 'text']), ['1', '2', '1']) >>> res = plpy_exec(None, "select {arg1}, {arg2:int4}, {arg1}", {'arg1': '1', 'arg2': '2'}) DBG: plpy.execute("select '1', '2', '1'", []) >>> res = plpy_exec(GD, "select {arg1}, {arg2:int4}, {arg1}", {'arg1': '3', 'arg2': '4'}) DBG: plpy.execute(('PLAN', 'select $1, $2, $3', ['text', 'int4', 'text']), ['3', '4', '3']) >>> res = plpy_exec(GD, "select {arg1}, {arg2:int4}, {arg1}", {'arg1': '3'}) DBG: plpy.error("Missing arguments: [arg2] QUERY: 'select {arg1}, {arg2:int4}, {arg1}'") >>> res = plpy_exec(GD, "select {arg1}, {arg2:int4}, {arg1}", {'arg1': '3'}, False) DBG: plpy.execute(('PLAN', 'select $1, $2, $3', ['text', 'int4', 'text']), ['3', None, '3']) """ if gd is None: return PLPyQueryBuilder(sql, args).execute() try: sq = gd['plq_cache'][sql] except KeyError: if 'plq_cache' not in gd: gd['plq_cache'] = {} sq = PLPyQuery(sql) gd['plq_cache'][sql] = sq return sq.execute(args, all_keys_required) # some helper functions for convenient sql execution def run_query(cur, sql, params = None, **kwargs): """ Helper function if everything you need is just paramertisized execute Sets rows_found that is coneninet to use when you don't need result just want to know how many rows were affected """ params = params or kwargs sql = QueryBuilder(sql, params).get_sql(0) cur.execute(sql) rows = cur.fetchall() # convert result rows to dbdict if rows: rows = [skytools.dbdict(r) for r in rows] return rows def run_query_row(cur, sql, params = None, **kwargs): """ Helper function if everything you need is just paramertisized execute to fetch one row only. If not found none is returned """ params = params or kwargs rows = run_query(cur, sql, params) if len(rows) == 0: return None return rows[0] def run_lookup(cur, sql, params = None, **kwargs): """ Helper function to fetch one value Takes away all the hassle of preparing statements and processing returned result giving out just one value. """ params = params or kwargs sql = QueryBuilder(sql, params).get_sql(0) cur.execute(sql) row = cur.fetchone() if row is None: return None return row[0] def run_exists(cur, sql, params = None, **kwargs): """ Helper function to fetch one value Takes away all the hassle of preparing statements and processing returned result giving out just one value. """ params = params or kwargs val = run_lookup(cur, sql, params) return not (val is None) # fake plpy for testing class fake_plpy: def prepare(self, sql, types): print "DBG: plpy.prepare(%s, %s)" % (repr(sql), repr(types)) return ('PLAN', sql, types) def execute(self, plan, args = []): print "DBG: plpy.execute(%s, %s)" % (repr(plan), repr(args)) def error(self, msg): print "DBG: plpy.error(%s)" % repr(msg) # launch doctest if __name__ == '__main__': import doctest plpy = fake_plpy() GD = {} doctest.testmod() skytools-3.2.6/python/skytools/config.py0000644000000000000000000001655712426435645015357 0ustar """Nicer config class.""" import os, os.path, ConfigParser, socket import skytools __all__ = ['Config'] class Config(object): """Bit improved ConfigParser. Additional features: - Remembers section. - Accepts defaults in get() functions. - List value support. """ def __init__(self, main_section, filename, sane_config = 1, user_defs = {}, override = {}, ignore_defs = False): """Initialize Config and read from file. @param sane_config: chooses between ConfigParser/SafeConfigParser. """ # use config file name as default job_name if filename: job_name = os.path.splitext(os.path.basename(filename))[0] else: job_name = main_section # initialize defaults, make them usable in config file if ignore_defs: self.defs = {} else: self.defs = { 'job_name': job_name, 'service_name': main_section, 'host_name': socket.gethostname(), } if filename: self.defs['config_dir'] = os.path.dirname(filename) self.defs['config_file'] = filename self.defs.update(user_defs) self.main_section = main_section self.filename = filename self.sane_config = sane_config self.override = override if sane_config: self.cf = ConfigParser.SafeConfigParser() else: self.cf = ConfigParser.ConfigParser() if filename is None: self.cf.add_section(main_section) elif not os.path.isfile(filename): raise Exception('Config file not found: '+filename) self.reload() def reload(self): """Re-reads config file.""" if self.filename: self.cf.read(self.filename) if not self.cf.has_section(self.main_section): raise Exception("Wrong config file, no section '%s'" % self.main_section) # apply default if key not set for k, v in self.defs.items(): if not self.cf.has_option(self.main_section, k): self.cf.set(self.main_section, k, v) # apply overrides if self.override: for k, v in self.override.items(): self.cf.set(self.main_section, k, v) def get(self, key, default=None): """Reads string value, if not set then default.""" try: return self.cf.get(self.main_section, key) except ConfigParser.NoOptionError: if default == None: raise Exception("Config value not set: " + key) return default def getint(self, key, default=None): """Reads int value, if not set then default.""" try: return self.cf.getint(self.main_section, key) except ConfigParser.NoOptionError: if default == None: raise Exception("Config value not set: " + key) return default def getboolean(self, key, default=None): """Reads boolean value, if not set then default.""" try: return self.cf.getboolean(self.main_section, key) except ConfigParser.NoOptionError: if default == None: raise Exception("Config value not set: " + key) return default def getfloat(self, key, default=None): """Reads float value, if not set then default.""" try: return self.cf.getfloat(self.main_section, key) except ConfigParser.NoOptionError: if default == None: raise Exception("Config value not set: " + key) return default def getlist(self, key, default=None): """Reads comma-separated list from key.""" try: s = self.cf.get(self.main_section, key).strip() res = [] if not s: return res for v in s.split(","): res.append(v.strip()) return res except ConfigParser.NoOptionError: if default == None: raise Exception("Config value not set: " + key) return default def getdict(self, key, default=None): """Reads key-value dict from parameter. Key and value are separated with ':'. If missing, key itself is taken as value. """ try: s = self.cf.get(self.main_section, key).strip() res = {} if not s: return res for kv in s.split(","): tmp = kv.split(':', 1) if len(tmp) > 1: k = tmp[0].strip() v = tmp[1].strip() else: k = kv.strip() v = k res[k] = v return res except ConfigParser.NoOptionError: if default == None: raise Exception("Config value not set: " + key) return default def getfile(self, key, default=None): """Reads filename from config. In addition to reading string value, expands ~ to user directory. """ fn = self.get(key, default) if fn == "" or fn == "-": return fn # simulate that the cwd is script location #path = os.path.dirname(sys.argv[0]) # seems bad idea, cwd should be cwd fn = os.path.expanduser(fn) return fn def getbytes(self, key, default=None): """Reads a size value in human format, if not set then default. Examples: 1, 2 B, 3K, 4 MB """ try: s = self.cf.get(self.main_section, key) except ConfigParser.NoOptionError: if default is None: raise Exception("Config value not set: " + key) s = default return skytools.hsize_to_bytes(s) def get_wildcard(self, key, values=[], default=None): """Reads a wildcard property from conf and returns its string value, if not set then default.""" orig_key = key keys = [key] for wild in values: key = key.replace('*', wild, 1) keys.append(key) keys.reverse() for key in keys: try: return self.cf.get(self.main_section, key) except ConfigParser.NoOptionError: pass if default == None: raise Exception("Config value not set: " + orig_key) return default def sections(self): """Returns list of sections in config file, excluding DEFAULT.""" return self.cf.sections() def has_section(self, section): """Checks if section is present in config file, excluding DEFAULT.""" return self.cf.has_section(section) def clone(self, main_section): """Return new Config() instance with new main section on same config file.""" return Config(main_section, self.filename, self.sane_config) def options(self): """Return list of options in main section.""" return self.cf.options(self.main_section) def has_option(self, opt): """Checks if option exists in main section.""" return self.cf.has_option(self.main_section, opt) def items(self): """Returns list of (name, value) for each option in main section.""" return self.cf.items(self.main_section) # define some aliases (short-cuts / backward compatibility cruft) getbool = getboolean skytools-3.2.6/python/skytools/_pyquoting.py0000644000000000000000000001105412426435645016273 0ustar # _pyquoting.py """Various helpers for string quoting/unquoting. Here is pure Python that should match C code in _cquoting. """ import urllib, re __all__ = [ "quote_literal", "quote_copy", "quote_bytea_raw", "db_urlencode", "db_urldecode", "unescape", "unquote_literal", ] # # SQL quoting # def quote_literal(s): """Quote a literal value for SQL. If string contains '\\', extended E'' quoting is used, otherwise standard quoting. Input value of None results in string "null" without quotes. Python implementation. """ if s == None: return "null" s = str(s).replace("'", "''") s2 = s.replace("\\", "\\\\") if len(s) != len(s2): return "E'" + s2 + "'" return "'" + s2 + "'" def quote_copy(s): """Quoting for copy command. None is converted to \\N. Python implementation. """ if s == None: return "\\N" s = str(s) s = s.replace("\\", "\\\\") s = s.replace("\t", "\\t") s = s.replace("\n", "\\n") s = s.replace("\r", "\\r") return s _bytea_map = None def quote_bytea_raw(s): """Quoting for bytea parser. Returns None as None. Python implementation. """ global _bytea_map if s == None: return None if 1 and _bytea_map is None: _bytea_map = {} for i in xrange(256): c = chr(i) if i < 0x20 or i >= 0x7F: _bytea_map[c] = "\\%03o" % i elif c == "\\": _bytea_map[c] = r"\\" else: _bytea_map[c] = c return "".join([_bytea_map[c] for c in s]) # # Database specific urlencode and urldecode. # def db_urlencode(dict): """Database specific urlencode. Encode None as key without '='. That means that in "foo&bar=", foo is NULL and bar is empty string. Python implementation. """ elem_list = [] for k, v in dict.items(): if v is None: elem = urllib.quote_plus(str(k)) else: elem = urllib.quote_plus(str(k)) + '=' + urllib.quote_plus(str(v)) elem_list.append(elem) return '&'.join(elem_list) def db_urldecode(qs): """Database specific urldecode. Decode key without '=' as None. This also does not support one key several times. Python implementation. """ res = {} for elem in qs.split('&'): if not elem: continue pair = elem.split('=', 1) name = urllib.unquote_plus(pair[0]) # keep only one instance around name = intern(str(name)) if len(pair) == 1: res[name] = None else: res[name] = urllib.unquote_plus(pair[1]) return res # # Remove C-like backslash escapes # _esc_re = r"\\([0-7]{1,3}|.)" _esc_rc = re.compile(_esc_re) _esc_map = { 't': '\t', 'n': '\n', 'r': '\r', 'a': '\a', 'b': '\b', "'": "'", '"': '"', '\\': '\\', } def _sub_unescape_c(m): """unescape single escape seq.""" v = m.group(1) if (len(v) == 1) and (v < '0' or v > '7'): try: return _esc_map[v] except KeyError: return v else: return chr(int(v, 8)) def unescape(val): """Removes C-style escapes from string. Python implementation. """ return _esc_rc.sub(_sub_unescape_c, val) _esql_re = r"''|\\([0-7]{1,3}|.)" _esql_rc = re.compile(_esql_re) def _sub_unescape_sqlext(m): """Unescape extended-quoted string.""" if m.group() == "''": return "'" v = m.group(1) if (len(v) == 1) and (v < '0' or v > '7'): try: return _esc_map[v] except KeyError: return v return chr(int(v, 8)) def unquote_literal(val, stdstr = False): """Unquotes SQL string. E'..' -> extended quoting. '..' -> standard or extended quoting null -> None other -> returned as-is """ if val[0] == "'" and val[-1] == "'": if stdstr: return val[1:-1].replace("''", "'") else: return _esql_rc.sub(_sub_unescape_sqlext, val[1:-1]) elif len(val) > 2 and val[0] in ('E', 'e') and val[1] == "'" and val[-1] == "'": return _esql_rc.sub(_sub_unescape_sqlext, val[2:-1]) elif len(val) >= 2 and val[0] == '$' and val[-1] == '$': p1 = val.find('$', 1) p2 = val.rfind('$', 1, -1) if p1 > 0 and p2 > p1: t1 = val[:p1+1] t2 = val[p2:] if t1 == t2: return val[len(t1):-len(t1)] raise Exception("Bad dollar-quoted string") elif val.lower() == "null": return None return val skytools-3.2.6/python/skytools/plpy_applyrow.py0000644000000000000000000001506312426435645017022 0ustar """ PLPY helper module for applying row events from pgq.logutriga(). """ import plpy import pkgloader pkgloader.require('skytools', '3.0') import skytools ## TODO: automatic fkey detection # find FK columns FK_SQL = """ SELECT (SELECT array_agg( (SELECT attname::text FROM pg_attribute WHERE attrelid = conrelid AND attnum = conkey[i])) FROM generate_series(1, array_upper(conkey, 1)) i) AS kcols, (SELECT array_agg( (SELECT attname::text FROM pg_attribute WHERE attrelid = confrelid AND attnum = confkey[i])) FROM generate_series(1, array_upper(confkey, 1)) i) AS fcols, confrelid::regclass::text AS ftable FROM pg_constraint WHERE conrelid = {tbl}::regclass AND contype='f' """ class DataError(Exception): "Invalid data" def colfilter_full(rnew, rold): return rnew def colfilter_changed(rnew, rold): res = {} for k, v in rnew: if rnew[k] != rold[k]: res[k] = rnew[k] return res def canapply_dummy(rnew, rold): return True def canapply_tstamp_helper(rnew, rold, tscol): tnew = rnew[tscol] told = rold[tscol] if not tnew[0].isdigit(): raise DataError('invalid timestamp') if not told[0].isdigit(): raise DataError('invalid timestamp') return tnew > told def applyrow(tblname, ev_type, new_row, backup_row = None, alt_pkey_cols = None, fkey_cols = None, fkey_ref_table = None, fkey_ref_cols = None, fn_canapply = canapply_dummy, fn_colfilter = colfilter_full): """Core logic. Actual decisions will be done in callback functions. - [IUD]: If row referenced by fkey does not exist, event is not applied - If pkey does not exist but alt_pkey does, row is not applied. @param tblname: table name, schema-qualified @param ev_type: [IUD]:pkey1,pkey2 @param alt_pkey_cols: list of alternatice columns to consuder @param fkey_cols: columns in this table that refer to other table @param fkey_ref_table: other table referenced here @param fkey_ref_cols: column in other table that must match @param fn_canapply: callback function, gets new and old row, returns whether the row should be applied @param fn_colfilter: callback function, gets new and old row, returns dict of final columns to be applied """ gd = None # parse ev_type tmp = ev_type.split(':', 1) if len(tmp) != 2 or tmp[0] not in ('I', 'U', 'D'): raise DataError('Unsupported ev_type: '+repr(ev_type)) if not tmp[1]: raise DataError('No pkey in event') cmd = tmp[0] pkey_cols = tmp[1].split(',') qtblname = skytools.quote_fqident(tblname) # parse ev_data fields = skytools.db_urldecode(new_row) if ev_type.find('}') >= 0: raise DataError('Really suspicious activity') if ",".join(fields.keys()).find('}') >= 0: raise DataError('Really suspicious activity 2') # generate pkey expressions tmp = ["%s = {%s}" % (skytools.quote_ident(k), k) for k in pkey_cols] pkey_expr = " and ".join(tmp) alt_pkey_expr = None if alt_pkey_cols: tmp = ["%s = {%s}" % (skytools.quote_ident(k), k) for k in alt_pkey_cols] alt_pkey_expr = " and ".join(tmp) log = "data ok" # # Row data seems fine, now apply it # if fkey_ref_table: tmp = [] for k, rk in zip(fkey_cols, fkey_ref_cols): tmp.append("%s = {%s}" % (skytools.quote_ident(rk), k)) fkey_expr = " and ".join(tmp) q = "select 1 from only %s where %s" % ( skytools.quote_fqident(fkey_ref_table), fkey_expr) res = skytools.plpy_exec(gd, q, fields) if not res: return "IGN: parent row does not exist" log += ", fkey ok" # fetch old row if alt_pkey_expr: q = "select * from only %s where %s for update" % (qtblname, alt_pkey_expr) res = skytools.plpy_exec(gd, q, fields) if res: oldrow = res[0] # if altpk matches, but pk not, then delete need_del = 0 for k in pkey_cols: # fixme: proper type cmp? if fields[k] != str(oldrow[k]): need_del = 1 break if need_del: log += ", altpk del" q = "delete from only %s where %s" % (qtblname, alt_pkey_expr) skytools.plpy_exec(gd, q, fields) res = None else: log += ", altpk ok" else: # no altpk q = "select * from only %s where %s for update" % (qtblname, pkey_expr) res = skytools.plpy_exec(None, q, fields) # got old row, with same pk and altpk if res: oldrow = res[0] log += ", old row" ok = fn_canapply(fields, oldrow) if ok: log += ", new row better" if not ok: # ignore the update return "IGN:" + log + ", current row more up-to-date" else: log += ", no old row" oldrow = None if res: if cmd == 'I': cmd = 'U' else: if cmd == 'U': cmd = 'I' # allow column changes if oldrow: fields2 = fn_colfilter(fields, oldrow) for k in pkey_cols: if k not in fields2: fields2[k] = fields[k] fields = fields2 # apply change if cmd == 'I': q = skytools.mk_insert_sql(fields, tblname, pkey_cols) elif cmd == 'U': q = skytools.mk_update_sql(fields, tblname, pkey_cols) elif cmd == 'D': q = skytools.mk_delete_sql(fields, tblname, pkey_cols) else: plpy.error('Huh') plpy.execute(q) return log def ts_conflict_handler(gd, args): """Conflict handling based on timestamp column.""" conf = skytools.db_urldecode(args[0]) timefield = conf['timefield'] ev_type = args[1] ev_data = args[2] ev_extra1 = args[3] ev_extra2 = args[4] ev_extra3 = args[5] ev_extra4 = args[6] altpk = None if 'altpk' in conf: altpk = conf['altpk'].split(',') def ts_canapply(rnew, rold): return canapply_tstamp_helper(rnew, rold, timefield) return applyrow(ev_extra1, ev_type, ev_data, backup_row = ev_extra2, alt_pkey_cols = altpk, fkey_ref_table = conf.get('fkey_ref_table'), fkey_ref_cols = conf.get('fkey_ref_cols'), fkey_cols = conf.get('fkey_cols'), fn_canapply = ts_canapply) skytools-3.2.6/python/skytools/psycopgwrapper.py0000644000000000000000000001003112426435645017154 0ustar """Wrapper around psycopg2. Database connection provides regular DB-API 2.0 interface. Connection object methods:: .cursor() .commit() .rollback() .close() Cursor methods:: .execute(query[, args]) .fetchone() .fetchall() Sample usage:: db = self.get_database('somedb') curs = db.cursor() # query arguments as array q = "select * from table where id = %s and name = %s" curs.execute(q, [1, 'somename']) # query arguments as dict q = "select id, name from table where id = %(id)s and name = %(name)s" curs.execute(q, {'id': 1, 'name': 'somename'}) # loop over resultset for row in curs.fetchall(): # columns can be asked by index: id = row[0] name = row[1] # and by name: id = row['id'] name = row['name'] # now commit the transaction db.commit() Deprecated interface: .dictfetchall/.dictfetchone functions on cursor. Plain .fetchall() / .fetchone() give exact same result. """ __all__ = ['connect_database', 'DBError', 'I_AUTOCOMMIT', 'I_READ_COMMITTED', 'I_REPEATABLE_READ', 'I_SERIALIZABLE'] import sys import socket import psycopg2.extensions import psycopg2.extras import skytools from psycopg2 import Error as DBError from skytools.sockutil import set_tcp_keepalive I_AUTOCOMMIT = psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT I_READ_COMMITTED = psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED I_REPEATABLE_READ = psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ I_SERIALIZABLE = psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE class _CompatRow(psycopg2.extras.DictRow): """Make DictRow more dict-like.""" __slots__ = ('_index',) def __contains__(self, k): """Returns if such row has such column.""" return k in self._index def copy(self): """Return regular dict.""" return skytools.dbdict(self.iteritems()) def iterkeys(self): return self._index.iterkeys() def itervalues(self): return list.__iter__(self) # obj.foo access def __getattr__(self, k): return self[k] class _CompatCursor(psycopg2.extras.DictCursor): """Regular psycopg2 DictCursor with dict* methods.""" def __init__(self, *args, **kwargs): psycopg2.extras.DictCursor.__init__(self, *args, **kwargs) self.row_factory = _CompatRow dictfetchone = psycopg2.extras.DictCursor.fetchone dictfetchall = psycopg2.extras.DictCursor.fetchall dictfetchmany = psycopg2.extras.DictCursor.fetchmany class _CompatConnection(psycopg2.extensions.connection): """Connection object that uses _CompatCursor.""" my_name = '?' def cursor(self, name = None): if name: return psycopg2.extensions.connection.cursor(self, cursor_factory = _CompatCursor, name = name) else: return psycopg2.extensions.connection.cursor(self, cursor_factory = _CompatCursor) def connect_database(connstr, keepalive = True, tcp_keepidle = 4 * 60, # 7200 tcp_keepcnt = 4, # 9 tcp_keepintvl = 15): # 75 """Create a db connection with connect_timeout and TCP keepalive. Default connect_timeout is 15, to change put it directly into dsn. The extra tcp_* options are Linux-specific, see `man 7 tcp` for details. """ # allow override if connstr.find("connect_timeout") < 0: connstr += " connect_timeout=15" # create connection db = _CompatConnection(connstr) curs = db.cursor() # tune keepalive fd = hasattr(db, 'fileno') and db.fileno() or curs.fileno() set_tcp_keepalive(fd, keepalive, tcp_keepidle, tcp_keepcnt, tcp_keepintvl) # fill .server_version on older psycopg if not hasattr(db, 'server_version'): iso = db.isolation_level db.set_isolation_level(0) curs.execute('show server_version_num') db.server_version = int(curs.fetchone()[0]) db.set_isolation_level(iso) return db skytools-3.2.6/python/skytools/sqltools.py0000644000000000000000000004565512426435645015773 0ustar """Database tools.""" import os from cStringIO import StringIO import skytools try: import plpy except ImportError: pass __all__ = [ "fq_name_parts", "fq_name", "get_table_oid", "get_table_pkeys", "get_table_columns", "exists_schema", "exists_table", "exists_type", "exists_sequence", "exists_temp_table", "exists_view", "exists_function", "exists_language", "Snapshot", "magic_insert", "CopyPipe", "full_copy", "DBObject", "DBSchema", "DBTable", "DBFunction", "DBLanguage", "db_install", "installer_find_file", "installer_apply_file", "dbdict", "mk_insert_sql", "mk_update_sql", "mk_delete_sql", ] class dbdict(dict): """Wrapper on actual dict that allows accessing dict keys as attributes.""" # obj.foo access def __getattr__(self, k): "Return attribute." try: return self[k] except KeyError: raise AttributeError(k) def __setattr__(self, k, v): "Set attribute." self[k] = v def __delattr__(self, k): "Remove attribute." del self[k] def merge(self, other): for key in other: if key not in self: self[key] = other[key] # # Fully qualified table name # def fq_name_parts(tbl): """Return fully qualified name parts. >>> fq_name_parts('tbl') ['public', 'tbl'] >>> fq_name_parts('foo.tbl') ['foo', 'tbl'] >>> fq_name_parts('foo.tbl.baz') ['foo', 'tbl.baz'] """ tmp = tbl.split('.', 1) if len(tmp) == 1: return ['public', tbl] elif len(tmp) == 2: return tmp else: raise Exception('Syntax error in table name:'+tbl) def fq_name(tbl): """Return fully qualified name. >>> fq_name('tbl') 'public.tbl' >>> fq_name('foo.tbl') 'foo.tbl' >>> fq_name('foo.tbl.baz') 'foo.tbl.baz' """ return '.'.join(fq_name_parts(tbl)) # # info about table # def get_table_oid(curs, table_name): """Find Postgres OID for table.""" schema, name = fq_name_parts(table_name) q = """select c.oid from pg_namespace n, pg_class c where c.relnamespace = n.oid and n.nspname = %s and c.relname = %s""" curs.execute(q, [schema, name]) res = curs.fetchall() if len(res) == 0: raise Exception('Table not found: '+table_name) return res[0][0] def get_table_pkeys(curs, tbl): """Return list of pkey column names.""" oid = get_table_oid(curs, tbl) q = "SELECT k.attname FROM pg_index i, pg_attribute k"\ " WHERE i.indrelid = %s AND k.attrelid = i.indexrelid"\ " AND i.indisprimary AND k.attnum > 0 AND NOT k.attisdropped"\ " ORDER BY k.attnum" curs.execute(q, [oid]) return map(lambda x: x[0], curs.fetchall()) def get_table_columns(curs, tbl): """Return list of column names for table.""" oid = get_table_oid(curs, tbl) q = "SELECT k.attname FROM pg_attribute k"\ " WHERE k.attrelid = %s"\ " AND k.attnum > 0 AND NOT k.attisdropped"\ " ORDER BY k.attnum" curs.execute(q, [oid]) return map(lambda x: x[0], curs.fetchall()) # # exist checks # def exists_schema(curs, schema): """Does schema exists?""" q = "select count(1) from pg_namespace where nspname = %s" curs.execute(q, [schema]) res = curs.fetchone() return res[0] def exists_table(curs, table_name): """Does table exists?""" schema, name = fq_name_parts(table_name) q = """select count(1) from pg_namespace n, pg_class c where c.relnamespace = n.oid and c.relkind = 'r' and n.nspname = %s and c.relname = %s""" curs.execute(q, [schema, name]) res = curs.fetchone() return res[0] def exists_sequence(curs, seq_name): """Does sequence exists?""" schema, name = fq_name_parts(seq_name) q = """select count(1) from pg_namespace n, pg_class c where c.relnamespace = n.oid and c.relkind = 'S' and n.nspname = %s and c.relname = %s""" curs.execute(q, [schema, name]) res = curs.fetchone() return res[0] def exists_view(curs, view_name): """Does view exists?""" schema, name = fq_name_parts(view_name) q = """select count(1) from pg_namespace n, pg_class c where c.relnamespace = n.oid and c.relkind = 'v' and n.nspname = %s and c.relname = %s""" curs.execute(q, [schema, name]) res = curs.fetchone() return res[0] def exists_type(curs, type_name): """Does type exists?""" schema, name = fq_name_parts(type_name) q = """select count(1) from pg_namespace n, pg_type t where t.typnamespace = n.oid and n.nspname = %s and t.typname = %s""" curs.execute(q, [schema, name]) res = curs.fetchone() return res[0] def exists_function(curs, function_name, nargs): """Does function exists?""" # this does not check arg types, so may match several functions schema, name = fq_name_parts(function_name) q = """select count(1) from pg_namespace n, pg_proc p where p.pronamespace = n.oid and p.pronargs = %s and n.nspname = %s and p.proname = %s""" curs.execute(q, [nargs, schema, name]) res = curs.fetchone() # if unqualified function, check builtin functions too if not res[0] and function_name.find('.') < 0: name = "pg_catalog." + function_name return exists_function(curs, name, nargs) return res[0] def exists_language(curs, lang_name): """Does PL exists?""" q = """select count(1) from pg_language where lanname = %s""" curs.execute(q, [lang_name]) res = curs.fetchone() return res[0] def exists_temp_table(curs, tbl): """Does temp table exists?""" # correct way, works only on 8.2 q = "select 1 from pg_class where relname = %s and relnamespace = pg_my_temp_schema()" curs.execute(q, [tbl]) tmp = curs.fetchall() return len(tmp) > 0 # # Support for PostgreSQL snapshot # class Snapshot(object): """Represents a PostgreSQL snapshot. Example: >>> sn = Snapshot('11:20:11,12,15') >>> sn.contains(9) True >>> sn.contains(11) False >>> sn.contains(17) True >>> sn.contains(20) False """ def __init__(self, str): "Create snapshot from string." self.sn_str = str tmp = str.split(':') if len(tmp) != 3: raise Exception('Unknown format for snapshot') self.xmin = int(tmp[0]) self.xmax = int(tmp[1]) self.txid_list = [] if tmp[2] != "": for s in tmp[2].split(','): self.txid_list.append(int(s)) def contains(self, txid): "Is txid visible in snapshot." txid = int(txid) if txid < self.xmin: return True if txid >= self.xmax: return False if txid in self.txid_list: return False return True # # Copy helpers # def _gen_dict_copy(tbl, row, fields, qfields): tmp = [] for f in fields: v = row.get(f) tmp.append(skytools.quote_copy(v)) return "\t".join(tmp) def _gen_dict_insert(tbl, row, fields, qfields): tmp = [] for f in fields: v = row.get(f) tmp.append(skytools.quote_literal(v)) fmt = "insert into %s (%s) values (%s);" return fmt % (tbl, ",".join(qfields), ",".join(tmp)) def _gen_list_copy(tbl, row, fields, qfields): tmp = [] for i in range(len(fields)): try: v = row[i] except IndexError: v = None tmp.append(skytools.quote_copy(v)) return "\t".join(tmp) def _gen_list_insert(tbl, row, fields, qfields): tmp = [] for i in range(len(fields)): try: v = row[i] except IndexError: v = None tmp.append(skytools.quote_literal(v)) fmt = "insert into %s (%s) values (%s);" return fmt % (tbl, ",".join(qfields), ",".join(tmp)) def magic_insert(curs, tablename, data, fields = None, use_insert = 0, quoted_table = False): r"""Copy/insert a list of dict/list data to database. If curs == None, then the copy or insert statements are returned as string. For list of dict the field list is optional, as its possible to guess them from dict keys. Example: >>> magic_insert(None, 'tbl', [[1, '1'], [2, '2']], ['col1', 'col2']) 'COPY public.tbl (col1,col2) FROM STDIN;\n1\t1\n2\t2\n\\.\n' """ if len(data) == 0: return # decide how to process if hasattr(data[0], 'keys'): if fields == None: fields = data[0].keys() if use_insert: row_func = _gen_dict_insert else: row_func = _gen_dict_copy else: if fields == None: raise Exception("Non-dict data needs field list") if use_insert: row_func = _gen_list_insert else: row_func = _gen_list_copy qfields = [skytools.quote_ident(f) for f in fields] if quoted_table: qtablename = tablename else: qtablename = skytools.quote_fqident(tablename) # init processing buf = StringIO() if curs == None and use_insert == 0: fmt = "COPY %s (%s) FROM STDIN;\n" buf.write(fmt % (qtablename, ",".join(qfields))) # process data for row in data: buf.write(row_func(qtablename, row, fields, qfields)) buf.write("\n") # if user needs only string, return it if curs == None: if use_insert == 0: buf.write("\\.\n") return buf.getvalue() # do the actual copy/inserts if use_insert: curs.execute(buf.getvalue()) else: buf.seek(0) hdr = "%s (%s)" % (qtablename, ",".join(qfields)) curs.copy_from(buf, hdr) # # Full COPY of table from one db to another # class CopyPipe(object): "Splits one big COPY to chunks." def __init__(self, dstcurs, tablename = None, limit = 512*1024, sql_from = None): self.tablename = tablename self.sql_from = sql_from self.dstcurs = dstcurs self.buf = StringIO() self.limit = limit #hook for new data, hook func should return new data #def write_hook(obj, data): # return data self.write_hook = None #hook for flush, hook func result is discarded # def flush_hook(obj): # return None self.flush_hook = None self.total_rows = 0 self.total_bytes = 0 def write(self, data): "New data from psycopg" if self.write_hook: data = self.write_hook(self, data) self.total_bytes += len(data) self.total_rows += data.count("\n") if self.buf.tell() >= self.limit: pos = data.find('\n') if pos >= 0: # split at newline p1 = data[:pos + 1] p2 = data[pos + 1:] self.buf.write(p1) self.flush() data = p2 self.buf.write(data) def flush(self): "Send data out." if self.flush_hook: self.flush_hook(self) if self.buf.tell() <= 0: return self.buf.seek(0) if self.sql_from: self.dstcurs.copy_expert(self.sql_from, self.buf) else: self.dstcurs.copy_from(self.buf, self.tablename) self.buf.seek(0) self.buf.truncate() def full_copy(tablename, src_curs, dst_curs, column_list = [], condition = None, dst_tablename = None, dst_column_list = None, write_hook = None, flush_hook = None): """COPY table from one db to another.""" # default dst table and dst columns to source ones dst_tablename = dst_tablename or tablename dst_column_list = dst_column_list or column_list[:] if len(dst_column_list) != len(column_list): raise Exception('src and dst column lists must match in length') def build_qfields(cols): if cols: return ",".join([skytools.quote_ident(f) for f in cols]) else: return "*" def build_statement(table, cols): qtable = skytools.quote_fqident(table) if cols: qfields = build_qfields(cols) return "%s (%s)" % (qtable, qfields) else: return qtable dst = build_statement(dst_tablename, dst_column_list) if condition: src = "(SELECT %s FROM %s WHERE %s)" % (build_qfields(column_list), skytools.quote_fqident(tablename), condition) else: src = build_statement(tablename, column_list) if hasattr(src_curs, 'copy_expert'): sql_to = "COPY %s TO stdout" % src sql_from = "COPY %s FROM stdin" % dst buf = CopyPipe(dst_curs, sql_from = sql_from) buf.write_hook = write_hook buf.flush_hook = flush_hook src_curs.copy_expert(sql_to, buf) else: if condition: # regular psycopg copy_to generates invalid sql for subselect copy raise Exception('copy_expert() is needed for conditional copy') buf = CopyPipe(dst_curs, dst) buf.write_hook = write_hook buf.flush_hook = flush_hook src_curs.copy_to(buf, src) buf.flush() return (buf.total_bytes, buf.total_rows) # # SQL installer # class DBObject(object): """Base class for installable DB objects.""" name = None sql = None sql_file = None def __init__(self, name, sql = None, sql_file = None): """Generic dbobject init.""" self.name = name self.sql = sql self.sql_file = sql_file def create(self, curs, log = None): """Create a dbobject.""" if log: log.info('Installing %s' % self.name) if self.sql: sql = self.sql elif self.sql_file: fn = self.find_file() if log: log.info(" Reading from %s" % fn) sql = open(fn, "r").read() else: raise Exception('object not defined') for stmt in skytools.parse_statements(sql): #if log: log.debug(repr(stmt)) curs.execute(stmt) def find_file(self): """Find install script file.""" return installer_find_file(self.sql_file) class DBSchema(DBObject): """Handles db schema.""" def exists(self, curs): """Does schema exists.""" return exists_schema(curs, self.name) class DBTable(DBObject): """Handles db table.""" def exists(self, curs): """Does table exists.""" return exists_table(curs, self.name) class DBFunction(DBObject): """Handles db function.""" def __init__(self, name, nargs, sql = None, sql_file = None): """Function object - number of args is significant.""" DBObject.__init__(self, name, sql, sql_file) self.nargs = nargs def exists(self, curs): """Does function exists.""" return exists_function(curs, self.name, self.nargs) class DBLanguage(DBObject): """Handles db language.""" def __init__(self, name): """PL object - creation happens with CREATE LANGUAGE.""" DBObject.__init__(self, name, sql = "create language %s" % name) def exists(self, curs): """Does PL exists.""" return exists_language(curs, self.name) def db_install(curs, list, log = None): """Installs list of objects into db.""" for obj in list: if not obj.exists(curs): obj.create(curs, log) else: if log: log.info('%s is installed' % obj.name) def installer_find_file(filename): """Find SQL script from pre-defined paths.""" full_fn = None if filename[0] == "/": if os.path.isfile(filename): full_fn = filename else: import skytools.installer_config dir_list = skytools.installer_config.sql_locations for fdir in dir_list: fn = os.path.join(fdir, filename) if os.path.isfile(fn): full_fn = fn break if not full_fn: raise Exception('File not found: '+filename) return full_fn def installer_apply_file(db, filename, log): """Find SQL file and apply it to db, statement-by-statement.""" fn = installer_find_file(filename) sql = open(fn, "r").read() if log: log.info("applying %s" % fn) curs = db.cursor() for stmt in skytools.parse_statements(sql): #log.debug(repr(stmt)) curs.execute(stmt) # # Generate INSERT/UPDATE/DELETE statement # def mk_insert_sql(row, tbl, pkey_list = None, field_map = None): """Generate INSERT statement from dict data. >>> mk_insert_sql({'id': '1', 'data': None}, 'tbl') "insert into public.tbl (data, id) values (null, '1');" """ col_list = [] val_list = [] if field_map: for src, dst in field_map.iteritems(): col_list.append(skytools.quote_ident(dst)) val_list.append(skytools.quote_literal(row[src])) else: for c, v in row.iteritems(): col_list.append(skytools.quote_ident(c)) val_list.append(skytools.quote_literal(v)) col_str = ", ".join(col_list) val_str = ", ".join(val_list) return "insert into %s (%s) values (%s);" % ( skytools.quote_fqident(tbl), col_str, val_str) def mk_update_sql(row, tbl, pkey_list, field_map = None): r"""Generate UPDATE statement from dict data. >>> mk_update_sql({'id': 0, 'id2': '2', 'data': 'str\\'}, 'Table', ['id', 'id2']) 'update only public."Table" set data = E\'str\\\\\' where id = \'0\' and id2 = \'2\';' """ if len(pkey_list) < 1: raise Exception("update needs pkeys") set_list = [] whe_list = [] pkmap = {} for k in pkey_list: pkmap[k] = 1 new_k = field_map and field_map[k] or k col = skytools.quote_ident(new_k) val = skytools.quote_literal(row[k]) whe_list.append("%s = %s" % (col, val)) if field_map: for src, dst in field_map.iteritems(): if src not in pkmap: col = skytools.quote_ident(dst) val = skytools.quote_literal(row[src]) set_list.append("%s = %s" % (col, val)) else: for col, val in row.iteritems(): if col not in pkmap: col = skytools.quote_ident(col) val = skytools.quote_literal(val) set_list.append("%s = %s" % (col, val)) return "update only %s set %s where %s;" % (skytools.quote_fqident(tbl), ", ".join(set_list), " and ".join(whe_list)) def mk_delete_sql(row, tbl, pkey_list, field_map = None): """Generate DELETE statement from dict data.""" if len(pkey_list) < 1: raise Exception("delete needs pkeys") whe_list = [] for k in pkey_list: new_k = field_map and field_map[k] or k col = skytools.quote_ident(new_k) val = skytools.quote_literal(row[k]) whe_list.append("%s = %s" % (col, val)) whe_str = " and ".join(whe_list) return "delete from only %s where %s;" % (skytools.quote_fqident(tbl), whe_str) if __name__ == '__main__': import doctest doctest.testmod() skytools-3.2.6/python/skytools/installer_config.py.in0000644000000000000000000000023212426435645020020 0ustar """SQL script locations.""" __all__ = ['sql_locations'] sql_locations = [ "@SQLDIR@", ] package_version = "@PACKAGE_VERSION@" skylog = @SKYLOG@ skytools-3.2.6/python/skytools/gzlog.py0000644000000000000000000000141312426435645015215 0ustar """Atomic append of gzipped data. The point is - if several gzip streams are concatenated, they are read back as one whole stream. """ import gzip from cStringIO import StringIO __all__ = ['gzip_append'] # # gzip storage # def gzip_append(filename, data, level = 6): """Append a block of data to file with safety checks.""" # compress data buf = StringIO() g = gzip.GzipFile(fileobj = buf, compresslevel = level, mode = "w") g.write(data) g.close() zdata = buf.getvalue() # append, safely f = open(filename, "a+", 0) f.seek(0, 2) pos = f.tell() try: f.write(zdata) f.close() except Exception, ex: # rollback on error f.seek(pos, 0) f.truncate() f.close() raise ex skytools-3.2.6/python/skytools/fileutil.py0000644000000000000000000001005412426435645015711 0ustar """File utilities >>> import tempfile, os >>> pidfn = tempfile.mktemp('.pid') >>> write_atomic(pidfn, "1") >>> write_atomic(pidfn, "2") >>> os.remove(pidfn) >>> write_atomic(pidfn, "1", '.bak') >>> write_atomic(pidfn, "2", '.bak') >>> os.remove(pidfn) """ import sys import os import errno __all__ = ['write_atomic', 'signal_pidfile'] # non-win32 def write_atomic(fn, data, bakext=None, mode='b'): """Write file with rename.""" if mode not in ['', 'b', 't']: raise ValueError("unsupported fopen mode") # write new data to tmp file fn2 = fn + '.new' f = open(fn2, 'w' + mode) f.write(data) f.close() # link old data to bak file if bakext: if bakext.find('/') >= 0: raise ValueError("invalid bakext") fnb = fn + bakext try: os.unlink(fnb) except OSError, e: if e.errno != errno.ENOENT: raise try: os.link(fn, fnb) except OSError, e: if e.errno != errno.ENOENT: raise # win32 does not like replace if sys.platform == 'win32': try: os.remove(fn) except: pass # atomically replace file os.rename(fn2, fn) def signal_pidfile(pidfile, sig): """Send a signal to process whose ID is located in pidfile. Read only first line of pidfile to support multiline pidfiles like postmaster.pid. Returns True is successful, False if pidfile does not exist or process itself is dead. Any other errors will passed as exceptions.""" ln = '' try: f = open(pidfile, 'r') ln = f.readline().strip() f.close() pid = int(ln) if sig == 0 and sys.platform == 'win32': return win32_detect_pid(pid) os.kill(pid, sig) return True except IOError, ex: if ex.errno != errno.ENOENT: raise except OSError, ex: if ex.errno != errno.ESRCH: raise except ValueError, ex: # this leaves slight race when someone is just creating the file, # but more common case is old empty file. if not ln: return False raise ValueError('Corrupt pidfile: %s' % pidfile) return False def win32_detect_pid(pid): """Process detection for win32.""" # avoid pywin32 dependecy, use ctypes instead import ctypes # win32 constants PROCESS_QUERY_INFORMATION = 1024 STILL_ACTIVE = 259 ERROR_INVALID_PARAMETER = 87 ERROR_ACCESS_DENIED = 5 # Load kernel32.dll k = ctypes.windll.kernel32 OpenProcess = k.OpenProcess OpenProcess.restype = ctypes.c_void_p # query pid exit code h = OpenProcess(PROCESS_QUERY_INFORMATION, 0, pid) if h == None: err = k.GetLastError() if err == ERROR_INVALID_PARAMETER: return False if err == ERROR_ACCESS_DENIED: return True raise OSError(errno.EFAULT, "Unknown win32error: " + str(err)) code = ctypes.c_int() k.GetExitCodeProcess(h, ctypes.byref(code)) k.CloseHandle(h) return code.value == STILL_ACTIVE def win32_write_atomic(fn, data, bakext=None, mode='b'): """Write file with rename for win32.""" if mode not in ['', 'b', 't']: raise ValueError("unsupported fopen mode") # write new data to tmp file fn2 = fn + '.new' f = open(fn2, 'w' + mode) f.write(data) f.close() # move old data to bak file if bakext: if bakext.find('/') >= 0: raise ValueError("invalid bakext") fnb = fn + bakext try: os.remove(fnb) except OSError, e: if e.errno != errno.ENOENT: raise try: os.rename(fn, fnb) except OSError, e: if e.errno != errno.ENOENT: raise else: try: os.remove(fn) except: pass # replace file os.rename(fn2, fn) if sys.platform == 'win32': write_atomic = win32_write_atomic if __name__ == '__main__': import doctest doctest.testmod() skytools-3.2.6/python/skytools/dbservice.py0000755000000000000000000005723412426435645016060 0ustar #! /usr/bin/env python """ Class used to handle multiset receiving and returning PL/Python procedures """ import re, skytools from skytools import dbdict __all__ = ['DBService', 'ServiceContext', 'get_record', 'get_record_list', 'make_record', 'make_record_array', 'TableAPI', #'log_result', 'transform_fields' ] try: import plpy except ImportError: pass def transform_fields(rows, key_fields, name_field, data_field): """Convert multiple-rows per key input array to one-row, multiple-column output array. The input arrays must be sorted by the key fields. >>> rows = [] >>> rows.append({'time': '22:00', 'metric': 'count', 'value': 100}) >>> rows.append({'time': '22:00', 'metric': 'dur', 'value': 7}) >>> rows.append({'time': '23:00', 'metric': 'count', 'value': 200}) >>> rows.append({'time': '23:00', 'metric': 'dur', 'value': 5}) >>> transform_fields(rows, ['time'], 'metric', 'value') [{'count': 100, 'dur': 7, 'time': '22:00'}, {'count': 200, 'dur': 5, 'time': '23:00'}] """ cur_key = None cur_row = None res = [] for r in rows: k = [r[f] for f in key_fields] if k != cur_key: cur_key = k cur_row = {} for f in key_fields: cur_row[f] = r[f] res.append(cur_row) cur_row[r[name_field]] = r[data_field] return res # render_table def render_table(rows, fields): """ Render result rows as a table. Returns array of lines. """ widths = [15] * len(fields) for row in rows: for i, k in enumerate(fields): rlen = len(str(row.get(k))) widths[i] = widths[i] > rlen and widths[i] or rlen widths = [w + 2 for w in widths] fmt = '%%-%ds' * (len(widths) - 1) + '%%s' fmt = fmt % tuple(widths[:-1]) lines = [] lines.append(fmt % tuple(fields)) lines.append(fmt % tuple(['-'*15] * len(fields))) for row in rows: lines.append(fmt % tuple([str(row.get(k)) for k in fields])) return lines # data conversion to and from url def get_record(arg): """ Parse data for one urlencoded record. Useful for turning incoming serialized data into structure usable for manipulation. """ if not arg: return dbdict() # allow array of single record if arg[0] in ('{', '['): lst = skytools.parse_pgarray(arg) if len(lst) != 1: raise ValueError('get_record() expects exactly 1 row, got %d' % len(lst)) arg = lst[0] # parse record return dbdict(skytools.db_urldecode(arg)) def get_record_list(array): """ Parse array of urlencoded records. Useful for turning incoming serialized data into structure usable for manipulation. """ if array is None: return [] if isinstance(array, list): return map(get_record, array) else: return map(get_record, skytools.parse_pgarray(array)) def get_record_lists(tbl, field): """ Create dictionary of lists from given list using field as grouping criteria Used for master detail operatons to group detail records according to master id """ dict = dbdict() for rec in tbl: id = str( rec[field] ) dict.setdefault( id, [] ).append(rec) return dict def _make_record_convert(row): """Converts complex values.""" d = row.copy() for k, v in d.items(): if isinstance(v, list): d[k] = skytools.make_pgarray(v) return skytools.db_urlencode(d) def make_record(row): """ Takes record as dict and returns it as urlencoded string. Used to send data out of db service layer.or to fake incoming calls """ for v in row.values(): if isinstance(v, list): return _make_record_convert(row) return skytools.db_urlencode(row) def make_record_array(rowlist): """ Takes list of records got from plpy execute and turns it into postgers aray string. Used to send data out of db service layer. """ return '{' + ','.join( map(make_record, rowlist) ) + '}' def get_result_items(list, name): """ Get return values from result """ for r in list: if r['res_code'] == name: return get_record_list(r['res_rows']) return None def log_result(log, list): """ Sends dbservice execution logs to logfile """ msglist = get_result_items(list, "_status") if msglist is None: if list: log.warning('Unhandled output result: _status res_code not present.') else: for msg in msglist: log.debug( msg['_message'] ) class DBService: """ Wrap parameterized query handling and multiset stored procedure writing """ ROW = "_row" # name of the fake field where internal record id is stored FIELD = "_field" # parameter name for the field in record that is related to current message PARAM = "_param" # name of the parameter to which message relates SKIP = "skip" # used when record is needed for it's data but is not been updated INSERT = "insert" UPDATE = "update" DELETE = "delete" INFO = "info" # just informative message for the user NOTICE = "notice" # more than info less than warning WARNING = "warning" # warning message, something is out of ordinary ERROR = "error" # error found but execution continues until check then error is raised FATAL = "fatal" # execution is terminated at once and all found errors returned def __init__(self, context, global_dict = None): """ This object must be initiated in the beginning of each db service """ rec = skytools.db_urldecode(context) self._context = context # used to run dbservice in retval self.global_dict = global_dict # used for cacheing query plans self._retval = [] # used to collect return resultsets self._is_test = 'is_test' in rec # used to convert output into human readable form self.sqls = None # if sqls stays None then no recording of sqls is done if "show_sql" in rec: # api must add exected sql to resultset self.sqls = [] # sql's executed by dbservice, used for dubugging self.can_save = True # used to keep value most severe error found so far self.messages = [] # used to hold list of messages to be returned to the user # error and message handling def tell_user(self, severity, code, message, params = None, **kvargs): """ Adds another message to the set of messages to be sent back to user If error message then can_save is set false If fatal message then error or found errors are raised at once """ params = params or kvargs #plpy.notice("%s %s: %s %s" % (severity, code, message, str(params))) params["_severity"] = severity params["_code"] = code params["_message"] = message self.messages.append( params ) if severity == self.ERROR: self.can_save = False if severity == self.FATAL: self.can_save = False self.raise_if_errors() def raise_if_errors(self): """ To be used in places where before continuing must be chcked if errors have been found Raises found errors packing them into error message as urlencoded string """ if not self.can_save: msgs = "Dbservice error(s): " + make_record_array( self.messages ) plpy.error( msgs ) # run sql meant mostly for select but not limited to def create_query(self, sql, params = None, **kvargs): """ Returns initialized querybuilder object for building complex dynamic queries """ params = params or kvargs return skytools.PLPyQueryBuilder(sql, params, self.global_dict, self.sqls ) def run_query(self, sql, params = None, **kvargs): """ Helper function if everything you need is just paramertisized execute Sets rows_found that is coneninet to use when you don't need result just want to know how many rows were affected """ params = params or kvargs rows = skytools.plpy_exec(self.global_dict, sql, params) # convert result rows to dbdict if rows: rows = [dbdict(r) for r in rows] self.rows_found = len(rows) else: self.rows_found = 0 return rows def run_query_row(self, sql, params = None, **kvargs): """ Helper function if everything you need is just paramertisized execute to fetch one row only. If not found none is returned """ params = params or kvargs rows = self.run_query( sql, params ) if len(rows) == 0: return None return rows[0] def run_exists(self, sql, params = None, **kvargs): """ Helper function to find out that record in given table exists using values in dict as criteria. Takes away all the hassle of preparing statements and processing returned result giving out just one boolean """ params = params or kvargs self.run_query( sql, params ) return self.rows_found def run_lookup(self, sql, params = None, **kvargs): """ Helper function to fetch one value Takes away all the hassle of preparing statements and processing returned result giving out just one value. Uses plan cache if used inside db service """ params = params or kvargs rows = self.run_query( sql, params ) if len(rows) == 0: return None row = rows[0] return row.values()[0] # resultset handling def return_next(self, rows, res_name, severity = None): """ Adds given set of rows to resultset """ self._retval.append([res_name, rows]) if severity is not None and len(rows) == 0: self.tell_user(severity, "dbsXXXX", "No matching records found") return rows def return_next_sql(self, sql, params, res_name, severity = None): """ Exectes query and adds recors resultset """ rows = self.run_query( sql, params ) return self.return_next( rows, res_name, severity ) def retval(self, service_name = None, params = None, **kvargs): """ Return collected resultsets and append to the end messages to the users Method is called usually as last statment in dbservice to return the results Also converts results into desired format """ params = params or kvargs self.raise_if_errors() if len( self.messages ): self.return_next( self.messages, "_status" ) if self.sqls is not None and len( self.sqls ): self.return_next( self.sqls, "_sql" ) results = [] for r in self._retval: res_name = r[0] rows = r[1] res_count = str(len(rows)) if self._is_test and len(rows) > 0: results.append([res_name, res_count, res_name]) n = 1 for trow in render_table(rows, rows[0].keys()): results.append([res_name, n, trow]) n += 1 else: res_rows = make_record_array(rows) results.append([res_name, res_count, res_rows]) if service_name: sql = "select * from %s( {i_context}, {i_params} );" % skytools.quote_fqident(service_name) par = dbdict( i_context = self._context, i_params = make_record(params) ) res = self.run_query( sql, par ) for r in res: results.append((r.res_code, r.res_text, r.res_rows)) return results # miscellaneous def check_required(self, record_name, record, severity, *fields): """ Checks if all required fields are present in record Used to validate incoming data Returns list of field names that are missing or empty """ missing = [] params = {self.PARAM: record_name} if self.ROW in record: params[self.ROW] = record[self.ROW] for field in fields: params[self.FIELD] = field if field in record: if record[field] is None or (isinstance(record[field], basestring) and len(record[field]) == 0): self.tell_user(severity, "dbsXXXX", "Required value missing: {%s}.{%s}" % (self.PARAM, self.FIELD), **params) missing.append(field) else: self.tell_user(severity, "dbsXXXX", "Required field missing: {%s}.{%s}" % (self.PARAM, self.FIELD), **params) missing.append(field) return missing # TableAPI class TableAPI: """ Class for managing one record updates using primary key """ _table = None # schema name and table name _where = None # where condition used for update and delete _id = None # name of the primary key filed _id_type = None # column type of primary key _op = None # operation currently carried out _ctx = None # context object for username and version _logging = True # should tapi log data changed _row = None # row identifer from calling program def __init__(self, ctx, table, create_log = True, id_type='int8' ): """ Table name is used to construct insert update and delete statements Table must have primary key field whose name is in format id_ Tablename should be in format schema.tablename """ self._ctx = ctx self._table = skytools.quote_fqident(table) self._id = "id_" + skytools.fq_name_parts(table)[1] self._id_type = id_type self._where = '%s = {%s:%s}' % (skytools.quote_ident(self._id), self._id, self._id_type) self._logging = create_log def _log(self, result, original = None): """ Log changei into table log.changelog """ if not self._logging: return changes = [] for key in result.keys(): if self._op == 'update': if key in original: if str(original[key]) <> str(result[key]): changes.append( key + ": " + str(original[key]) + " -> " + str(result[key]) ) else: changes.append( key + ": " + str(result[key]) ) self._ctx.log( self._table, result[ self._id ], self._op, "\n".join(changes) ) def _version_check(self, original, version): if original is None: self._ctx.tell_user( self._ctx.INFO, "dbsXXXX", "Record ({table}.{field}={id}) has been deleted by other user while you were editing. Check version ({ver}) in changelog for details.", table = self._table, field = self._id, id = original[self._id], ver = original.version, _row = self._row ) if version is not None and original.version is not None: if int(version) != int(original.version): self._ctx.tell_user( self._ctx.INFO, "dbsXXXX", "Record ({table}.{field}={id}) has been changed by other user while you were editing. Version in db: ({db_ver}) and version sent by caller ({caller_ver}). See changelog for details.", table = self._table, field = self._id, id = original[self._id], db_ver = original.version, caller_ver = version, _row = self._row ) def _insert(self, data): fields = [] values = [] for key in data.keys(): if data[key] is not None: # ignore empty fields.append(skytools.quote_ident(key)) values.append("{" + key + "}") sql = "insert into %s (%s) values (%s) returning *;" % ( self._table, ",".join(fields), ",".join(values)) result = self._ctx.run_query_row( sql, data ) self._log( result ) return result def _update(self, data, version): sql = "select * from %s where %s" % ( self._table, self._where ) original = self._ctx.run_query_row( sql, data ) self._version_check( original, version ) pairs = [] for key in data.keys(): if data[key] is None: pairs.append( key + " = NULL" ) else: pairs.append( key + " = {" + key + "}" ) sql = "update %s set %s where %s returning *;" % ( self._table, ", ".join(pairs), self._where ) result = self._ctx.run_query_row( sql, data ) self._log( result, original ) return result def _delete(self, data, version): sql = "delete from %s where %s returning *;" % ( self._table, self._where ) result = self._ctx.run_query_row( sql, data ) self._version_check( result, version ) self._log( result ) return result def do(self, data): """ Do dml according to special field _op that must be given together wit data """ result = data # so it is initialized for skip self._op = data.pop(self._ctx.OP) # determines operation done self._row = data.pop(self._ctx.ROW, None) # internal record id used for error reporting if self._row is None: # if no _row variable was provided self._row = data.get(self._id, None) # use id instead if self._id in data and data[self._id]: # if _id field is given if int( data[self._id] ) < 0: # and it is fake key generated by ui data.pop(self._id) # remove fake key so real one can be assigned version = data.get('version', None) # version sent from caller data['version'] = self._ctx.version # current transaction id is stored in each record if self._op == self._ctx.INSERT: result = self._insert( data ) elif self._op == self._ctx.UPDATE: result = self._update( data, version ) elif self._op == self._ctx.DELETE: result = self._delete( data, version ) elif self._op == self._ctx.SKIP: None else: self._ctx.tell_user( self._ctx.ERROR, "dbsXXXX", "Unahndled _op='{op}' value in TableAPI (table={table}, id={id})", op = self._op, table = self._table, id = data[self._id] ) result[self._ctx.OP] = self._op result[self._ctx.ROW] = self._row return result # ServiceContext class ServiceContext(DBService): OP = "_op" # name of the fake field where record modificaton operation is stored def __init__(self, context, global_dict = None): """ This object must be initiated in the beginning of each db service """ DBService.__init__(self, context, global_dict) rec = skytools.db_urldecode(context) if "username" not in rec: plpy.error("Username must be provided in db service context parameter") self.username = rec['username'] # used for logging purposes res = plpy.execute("select txid_current() as txid;") row = res[0] self.version = row["txid"] self.rows_found = 0 # Flag set by run query to inicate number of rows got # logging def log(self, _object_type, _key_object, _change_op, _payload): """ Log stuff into the changelog whatever seems relevant to be logged """ self.run_query( "select log.log_change( {version}, {username}, {object_type}, {key_object}, {change_op}, {payload} );", version= self.version , username= self.username , object_type= _object_type , key_object= _key_object , change_op= _change_op , payload= _payload ) # data conversion to and from url def get_record(self, arg): """ Parse data for one urlencoded record. Useful for turning incoming serialized data into structure usable for manipulation. """ return get_record(arg) def get_record_list(self, array): """ Parse array of urlencoded records. Useful for turning incoming serialized data into structure usable for manipulation. """ return get_record_list(array) def get_list_groups(self, tbl, field): """ Create dictionary of lists from given list using field as grouping criteria Used for master detail operatons to group detail records according to master id """ return get_record_lists(tbl, field) def make_record(self, row): """ Takes record as dict and returns it as urlencoded string. Used to send data out of db service layer.or to fake incoming calls """ return make_record(row) def make_record_array(self, rowlist): """ Takes list of records got from plpy execute and turns it into postgers aray string. Used to send data out of db service layer. """ return make_record_array(rowlist) # tapi based dml functions def _changelog(self, fields): log = True if fields: if '_log' in fields: if not fields.pop('_log'): log = False if '_log_id' in fields: fields.pop('_log_id') if '_log_field' in fields: fields.pop('_log_field') return log def tapi_do(self, tablename, row, **fields): """ Convenience function for just doing the change without creating tapi object first Fields object may contain aditional overriding values that are aplied before do """ tapi = TableAPI(self, tablename, self._changelog(fields)) row = row or dbdict() fields and row.update(fields) return tapi.do( row ) def tapi_do_set(self, tablename, rows, **fields): """ Does changes to list of detail rows Used for normal foreign keys in master detail relationships Dows first deletes then updates and then inserts to avoid uniqueness problems """ tapi = TableAPI(self, tablename, self._changelog(fields)) results, updates, inserts = [], [], [] for row in rows: fields and row.update(fields) if row[self.OP] == self.DELETE: results.append( tapi.do( row ) ) elif row[self.OP] == self.UPDATE: updates.append( row ) else: inserts.append( row ) for row in updates: results.append( tapi.do( row ) ) for row in inserts: results.append( tapi.do( row ) ) return results # resultset handling def retval_dbservice(self, service_name, ctx, **params): """ Runs service with standard interface. Convenient to use for calling select services from other services For example to return data after doing save """ self.raise_if_errors() service_sql = "select * from %s( {i_context}, {i_params} );" % skytools.quote_fqident(service_name) service_params = { "i_context": ctx, "i_params": self.make_record(params) } results = self.run_query( service_sql, service_params ) retval = self.retval() for r in results: retval.append((r.res_code, r.res_text, r.res_rows)) return retval # miscellaneous def field_copy(self, dict, *keys): """ Used to copy subset of fields from one record into another example: dbs.copy(record, hosting) "start_date", "key_colo", "key_rack") """ retval = dbdict() for key in keys: if key in dict: retval[key] = dict[key] return retval def field_set(self, **fields): """ Fills dict with given values and returns resulting dict If dict was not provied with call it is created """ return fields skytools-3.2.6/python/skytools/sockutil.py0000644000000000000000000000774212426435645015743 0ustar """Various low-level utility functions for sockets.""" __all__ = ['set_tcp_keepalive', 'set_nonblocking', 'set_cloexec'] import sys import os import socket try: import fcntl except ImportError: pass __all__ = ['set_tcp_keepalive', 'set_nonblocking', 'set_cloexec'] def set_tcp_keepalive(fd, keepalive = True, tcp_keepidle = 4 * 60, tcp_keepcnt = 4, tcp_keepintvl = 15): """Turn on TCP keepalive. The fd can be either numeric or socket object with 'fileno' method. OS defaults for SO_KEEPALIVE=1: - Linux: (7200, 9, 75) - can configure all. - MacOS: (7200, 8, 75) - can configure only tcp_keepidle. - Win32: (7200, 5|10, 1) - can configure tcp_keepidle and tcp_keepintvl. Our defaults: (240, 4, 15). >>> import socket >>> s = socket.socket() >>> set_tcp_keepalive(s) """ # usable on this OS? if not hasattr(socket, 'SO_KEEPALIVE') or not hasattr(socket, 'fromfd'): return # need socket object if isinstance(fd, socket.SocketType): s = fd else: if hasattr(fd, 'fileno'): fd = fd.fileno() s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) # skip if unix socket if type(s.getsockname()) != type(()): return # no keepalive? if not keepalive: s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0) return # basic keepalive s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # detect available options TCP_KEEPCNT = getattr(socket, 'TCP_KEEPCNT', None) TCP_KEEPINTVL = getattr(socket, 'TCP_KEEPINTVL', None) TCP_KEEPIDLE = getattr(socket, 'TCP_KEEPIDLE', None) TCP_KEEPALIVE = getattr(socket, 'TCP_KEEPALIVE', None) SIO_KEEPALIVE_VALS = getattr(socket, 'SIO_KEEPALIVE_VALS', None) if TCP_KEEPIDLE is None and TCP_KEEPALIVE is None and sys.platform == 'darwin': TCP_KEEPALIVE = 0x10 # configure if TCP_KEEPCNT is not None: s.setsockopt(socket.IPPROTO_TCP, TCP_KEEPCNT, tcp_keepcnt) if TCP_KEEPINTVL is not None: s.setsockopt(socket.IPPROTO_TCP, TCP_KEEPINTVL, tcp_keepintvl) if TCP_KEEPIDLE is not None: s.setsockopt(socket.IPPROTO_TCP, TCP_KEEPIDLE, tcp_keepidle) elif TCP_KEEPALIVE is not None: s.setsockopt(socket.IPPROTO_TCP, TCP_KEEPALIVE, tcp_keepidle) elif SIO_KEEPALIVE_VALS is not None: s.ioctl(SIO_KEEPALIVE_VALS, (1, tcp_keepidle*1000, tcp_keepintvl*1000)) def set_nonblocking(fd, onoff=True): """Toggle the O_NONBLOCK flag. If onoff==None then return current setting. Actual sockets from 'socket' module should use .setblocking() method, this is for situations where it is not available. Eg. pipes from 'subprocess' module. >>> import socket >>> s = socket.socket() >>> set_nonblocking(s, None) False >>> set_nonblocking(s, 1) >>> set_nonblocking(s, None) True """ flags = fcntl.fcntl(fd, fcntl.F_GETFL) if onoff is None: return (flags & os.O_NONBLOCK) > 0 if onoff: flags |= os.O_NONBLOCK else: flags &= ~os.O_NONBLOCK fcntl.fcntl(fd, fcntl.F_SETFL, flags) def set_cloexec(fd, onoff=True): """Toggle the FD_CLOEXEC flag. If onoff==None then return current setting. Some libraries do it automatically (eg. libpq). Others do not (Python stdlib). >>> import os >>> f = open(os.devnull, 'rb') >>> set_cloexec(f, None) False >>> set_cloexec(f, True) >>> set_cloexec(f, None) True >>> import socket >>> s = socket.socket() >>> set_cloexec(s, None) False >>> set_cloexec(s) >>> set_cloexec(s, None) True """ flags = fcntl.fcntl(fd, fcntl.F_GETFD) if onoff is None: return (flags & fcntl.FD_CLOEXEC) > 0 if onoff: flags |= fcntl.FD_CLOEXEC else: flags &= ~fcntl.FD_CLOEXEC fcntl.fcntl(fd, fcntl.F_SETFD, flags) if __name__ == '__main__': import doctest doctest.testmod() skytools-3.2.6/python/skytools/natsort.py0000644000000000000000000000274012426435645015571 0ustar """Natural sort. Compares numeric parts numerically. """ # Based on idea at http://code.activestate.com/recipes/285264/ # Works with both Python 2.x and 3.x # Ignores leading zeroes: 001 and 01 are considered equal import re as _re _rc = _re.compile(r'\d+|\D+') __all__ = ['natsort_key', 'natsort', 'natsorted', 'natsort_key_icase', 'natsort_icase', 'natsorted_icase'] def natsort_key(s): """Split string to numeric and non-numeric fragments.""" return [ not f[0].isdigit() and f or int(f, 10) for f in _rc.findall(s) ] def natsort(lst): """Natural in-place sort, case-sensitive.""" lst.sort(key = natsort_key) def natsorted(lst): """Return copy of list, sorted in natural order, case-sensitive. >>> natsorted(['ver-1.1', 'ver-1.11', '', 'ver-1.0']) ['', 'ver-1.0', 'ver-1.1', 'ver-1.11'] """ lst = lst[:] natsort(lst) return lst # case-insensitive api def natsort_key_icase(s): """Split string to numeric and non-numeric fragments.""" return natsort_key(s.lower()) def natsort_icase(lst): """Natural in-place sort, case-sensitive.""" lst.sort(key = natsort_key_icase) def natsorted_icase(lst): """Return copy of list, sorted in natural order, case-sensitive. >>> natsorted_icase(['Ver-1.1', 'vEr-1.11', '', 'veR-1.0']) ['', 'veR-1.0', 'Ver-1.1', 'vEr-1.11'] """ lst = lst[:] natsort_icase(lst) return lst # run doctest if __name__ == '__main__': import doctest doctest.testmod() skytools-3.2.6/python/skytools/__init__.py0000644000000000000000000002245712426435645015645 0ustar """Tools for Python database scripts.""" _symbols = { # skytools.adminscript 'AdminScript': 'skytools.adminscript:AdminScript', # skytools.config 'Config': 'skytools.config:Config', # skytools.dbservice 'DBService': 'skytools.dbservice:DBService', 'ServiceContext': 'skytools.dbservice:ServiceContext', 'TableAPI': 'skytools.dbservice:TableAPI', 'get_record': 'skytools.dbservice:get_record', 'get_record_list': 'skytools.dbservice:get_record_list', 'make_record': 'skytools.dbservice:make_record', 'make_record_array': 'skytools.dbservice:make_record_array', # skytools.dbstruct 'SeqStruct': 'skytools.dbstruct:SeqStruct', 'TableStruct': 'skytools.dbstruct:TableStruct', 'T_ALL': 'skytools.dbstruct:T_ALL', 'T_CONSTRAINT': 'skytools.dbstruct:T_CONSTRAINT', 'T_DEFAULT': 'skytools.dbstruct:T_DEFAULT', 'T_GRANT': 'skytools.dbstruct:T_GRANT', 'T_INDEX': 'skytools.dbstruct:T_INDEX', 'T_OWNER': 'skytools.dbstruct:T_OWNER', 'T_PARENT': 'skytools.dbstruct:T_PARENT', 'T_PKEY': 'skytools.dbstruct:T_PKEY', 'T_RULE': 'skytools.dbstruct:T_RULE', 'T_SEQUENCE': 'skytools.dbstruct:T_SEQUENCE', 'T_TABLE': 'skytools.dbstruct:T_TABLE', 'T_TRIGGER': 'skytools.dbstruct:T_TRIGGER', # skytools.fileutil 'signal_pidfile': 'skytools.fileutil:signal_pidfile', 'write_atomic': 'skytools.fileutil:write_atomic', # skytools.gzlog 'gzip_append': 'skytools.gzlog:gzip_append', # skytools.hashtext 'hashtext_old': 'skytools.hashtext:hashtext_old', 'hashtext_new': 'skytools.hashtext:hashtext_new', # skytools.natsort 'natsort': 'skytools.natsort:natsort', 'natsort_icase': 'skytools.natsort:natsort_icase', 'natsorted': 'skytools.natsort:natsorted', 'natsorted_icase': 'skytools.natsort:natsorted_icase', 'natsort_key': 'skytools.natsort:natsort_key', 'natsort_key_icase': 'skytools.natsort:natsort_key_icase', # skytools.parsing 'dedent': 'skytools.parsing:dedent', 'hsize_to_bytes': 'skytools.parsing:hsize_to_bytes', 'merge_connect_string': 'skytools.parsing:merge_connect_string', 'parse_acl': 'skytools.parsing:parse_acl', 'parse_connect_string': 'skytools.parsing:parse_connect_string', 'parse_logtriga_sql': 'skytools.parsing:parse_logtriga_sql', 'parse_pgarray': 'skytools.parsing:parse_pgarray', 'parse_sqltriga_sql': 'skytools.parsing:parse_sqltriga_sql', 'parse_statements': 'skytools.parsing:parse_statements', 'parse_tabbed_table': 'skytools.parsing:parse_tabbed_table', 'sql_tokenizer': 'skytools.parsing:sql_tokenizer', # skytools.psycopgwrapper 'connect_database': 'skytools.psycopgwrapper:connect_database', 'DBError': 'skytools.psycopgwrapper:DBError', 'I_AUTOCOMMIT': 'skytools.psycopgwrapper:I_AUTOCOMMIT', 'I_READ_COMMITTED': 'skytools.psycopgwrapper:I_READ_COMMITTED', 'I_REPEATABLE_READ': 'skytools.psycopgwrapper:I_REPEATABLE_READ', 'I_SERIALIZABLE': 'skytools.psycopgwrapper:I_SERIALIZABLE', # skytools.querybuilder 'PLPyQuery': 'skytools.querybuilder:PLPyQuery', 'PLPyQueryBuilder': 'skytools.querybuilder:PLPyQueryBuilder', 'QueryBuilder': 'skytools.querybuilder:QueryBuilder', 'plpy_exec': 'skytools.querybuilder:plpy_exec', 'run_exists': 'skytools.querybuilder:run_exists', 'run_lookup': 'skytools.querybuilder:run_lookup', 'run_query': 'skytools.querybuilder:run_query', 'run_query_row': 'skytools.querybuilder:run_query_row', # skytools.quoting 'db_urldecode': 'skytools.quoting:db_urldecode', 'db_urlencode': 'skytools.quoting:db_urlencode', 'json_decode': 'skytools.quoting:json_decode', 'json_encode': 'skytools.quoting:json_encode', 'make_pgarray': 'skytools.quoting:make_pgarray', 'quote_bytea_copy': 'skytools.quoting:quote_bytea_copy', 'quote_bytea_literal': 'skytools.quoting:quote_bytea_literal', 'quote_bytea_raw': 'skytools.quoting:quote_bytea_raw', 'quote_copy': 'skytools.quoting:quote_copy', 'quote_fqident': 'skytools.quoting:quote_fqident', 'quote_ident': 'skytools.quoting:quote_ident', 'quote_json': 'skytools.quoting:quote_json', 'quote_literal': 'skytools.quoting:quote_literal', 'quote_statement': 'skytools.quoting:quote_statement', 'unescape': 'skytools.quoting:unescape', 'unescape_copy': 'skytools.quoting:unescape_copy', 'unquote_fqident': 'skytools.quoting:unquote_fqident', 'unquote_ident': 'skytools.quoting:unquote_ident', 'unquote_literal': 'skytools.quoting:unquote_literal', # skytools.scripting 'BaseScript': 'skytools.scripting:BaseScript', 'daemonize': 'skytools.scripting:daemonize', 'DBScript': 'skytools.scripting:DBScript', 'UsageError': 'skytools.scripting:UsageError', # skytools.skylog 'getLogger': 'skytools.skylog:getLogger', # skytools.sockutil 'set_cloexec': 'skytools.sockutil:set_cloexec', 'set_nonblocking': 'skytools.sockutil:set_nonblocking', 'set_tcp_keepalive': 'skytools.sockutil:set_tcp_keepalive', # skytools.sqltools 'dbdict': 'skytools.sqltools:dbdict', 'CopyPipe': 'skytools.sqltools:CopyPipe', 'DBFunction': 'skytools.sqltools:DBFunction', 'DBLanguage': 'skytools.sqltools:DBLanguage', 'DBObject': 'skytools.sqltools:DBObject', 'DBSchema': 'skytools.sqltools:DBSchema', 'DBTable': 'skytools.sqltools:DBTable', 'Snapshot': 'skytools.sqltools:Snapshot', 'db_install': 'skytools.sqltools:db_install', 'exists_function': 'skytools.sqltools:exists_function', 'exists_language': 'skytools.sqltools:exists_language', 'exists_schema': 'skytools.sqltools:exists_schema', 'exists_sequence': 'skytools.sqltools:exists_sequence', 'exists_table': 'skytools.sqltools:exists_table', 'exists_temp_table': 'skytools.sqltools:exists_temp_table', 'exists_type': 'skytools.sqltools:exists_type', 'exists_view': 'skytools.sqltools:exists_view', 'fq_name': 'skytools.sqltools:fq_name', 'fq_name_parts': 'skytools.sqltools:fq_name_parts', 'full_copy': 'skytools.sqltools:full_copy', 'get_table_columns': 'skytools.sqltools:get_table_columns', 'get_table_oid': 'skytools.sqltools:get_table_oid', 'get_table_pkeys': 'skytools.sqltools:get_table_pkeys', 'installer_apply_file': 'skytools.sqltools:installer_apply_file', 'installer_find_file': 'skytools.sqltools:installer_find_file', 'magic_insert': 'skytools.sqltools:magic_insert', 'mk_delete_sql': 'skytools.sqltools:mk_delete_sql', 'mk_insert_sql': 'skytools.sqltools:mk_insert_sql', 'mk_update_sql': 'skytools.sqltools:mk_update_sql', # skytools.timeutil 'FixedOffsetTimezone': 'skytools.timeutil:FixedOffsetTimezone', 'datetime_to_timestamp': 'skytools.timeutil:datetime_to_timestamp', 'parse_iso_timestamp': 'skytools.timeutil:parse_iso_timestamp', # skytools.utf8 'safe_utf8_decode': 'skytools.utf8:safe_utf8_decode', } __all__ = _symbols.keys() _symbols['__version__'] = 'skytools.installer_config:package_version' if 1: # lazy-import exported vars import skytools.apipkg as _apipkg _apipkg.initpkg(__name__, _symbols, {'apipkg': _apipkg}) elif 1: # import everything immediately from skytools.quoting import * from skytools.sqltools import * from skytools.scripting import * from skytools.adminscript import * from skytools.config import * from skytools.dbservice import * from skytools.dbstruct import * from skytools.fileutil import * from skytools.gzlog import * from skytools.hashtext import * from skytools.natsort import * from skytools.parsing import * from skytools.psycopgwrapper import * from skytools.querybuilder import * from skytools.skylog import * from skytools.sockutil import * from skytools.timeutil import * from skytools.utf8 import * else: from skytools.quoting import * from skytools.sqltools import * from skytools.scripting import * # compare apipkg list to submodule exports xall = [] import skytools.adminscript import skytools.config import skytools.dbservice import skytools.dbstruct import skytools.fileutil import skytools.gzlog import skytools.hashtext import skytools.natsort import skytools.parsing import skytools.psycopgwrapper import skytools.querybuilder import skytools.quoting import skytools.scripting import skytools.skylog import skytools.sockutil import skytools.sqltools import skytools.timeutil import skytools.utf8 xall = ( skytools.adminscript.__all__ + skytools.config.__all__ + skytools.dbservice.__all__ + skytools.dbstruct.__all__ + skytools.fileutil.__all__ + skytools.gzlog.__all__ + skytools.hashtext.__all__ + skytools.natsort.__all__ + skytools.parsing.__all__ + skytools.psycopgwrapper.__all__ + skytools.querybuilder.__all__ + skytools.quoting.__all__ + skytools.scripting.__all__ + skytools.skylog.__all__ + skytools.sockutil.__all__ + skytools.sqltools.__all__ + skytools.timeutil.__all__ + skytools.utf8.__all__ ) for k in __all__: if k not in xall: print '%s missing from __all__?' % k for k in xall: if k not in __all__: print '%s missing from top-level?' % k skytools-3.2.6/python/conf/0000755000000000000000000000000012426435645012560 5ustar skytools-3.2.6/python/conf/skylog.ini0000644000000000000000000000325312426435645014574 0ustar ; notes: ; - 'args' is mandatory in [handler_*] sections ; - in lists there must not be spaces ; ; top-level config ; ; list of all loggers [loggers] keys=root ; root logger sees everything. there can be per-job configs by ; specifying loggers with job_name of the script. ; list of all handlers [handlers] ;; seems logger module immediately initialized all handlers, ;; whether they are actually used or not. so better ;; keep this list in sync with actual handler list. ;keys=stderr,logdb,logsrv,logfile keys=stderr ; list of all formatters [formatters] keys=short,long,syslog,none ; ; map specific loggers to specifig handlers ; [logger_root] level=DEBUG ;handlers=stderr,logdb,logsrv,logfile handlers=stderr ; ; configure formatters ; [formatter_short] format=%(asctime)s %(levelname)s %(message)s datefmt=%H:%M [formatter_long] format=%(asctime)s %(process)s %(levelname)s %(message)s [formatter_none] format=%(message)s [formatter_syslog] format=%(hostname)s %(service_name)s %(job_name)s %(message)s ; ; configure handlers ; ; file. args: stream [handler_stderr] class=StreamHandler args=(sys.stderr,) formatter=short ; log into db. args: conn_string [handler_logdb] class=skylog.LogDBHandler args=("host=127.0.0.1 port=5432 user=logger dbname=logdb",) formatter=none level=INFO ; JSON messages over UDP. args: host, port [handler_logsrv] class=skylog.UdpLogServerHandler args=('127.0.0.1', 6666) formatter=none ; rotating logfile. args: filename, maxsize, maxcount [handler_logfile] class=skylog.EasyRotatingFileHandler args=('~/log/%(job_name)s.log', 100*1024*1024, 3) formatter=long [handler_syslog] class=skylog.SysLogHandler args=(('localhost', 514),) formatter=syslog skytools-3.2.6/python/conf/wal-slave.ini0000644000000000000000000000174012426435645015156 0ustar [walmgr] job_name = wal-slave logfile = ~/log/%(job_name)s.log use_skylog = 1 slave_data = /var/lib/postgresql/8.3/main slave_bin = /usr/lib/postgresql/8.3/bin slave_stop_cmd = /etc/init.d/postgresql-8.3 stop slave_start_cmd = /etc/init.d/postgresql-8.3 start slave_config_dir = /etc/postgresql/8.3/main # alternative pg_xlog directory for slave, symlinked to pg_xlog on restore #slave_pg_xlog = /vol2/pg_xlog walmgr_data = ~/walshipping completed_wals = %(walmgr_data)s/logs.complete partial_wals = %(walmgr_data)s/logs.partial full_backup = %(walmgr_data)s/data.master config_backup = %(walmgr_data)s/config.backup backup_datadir = yes keep_backups = 0 archive_command = # primary database connect string for hot standby -- enabling # this will cause the slave to be started in hot standby mode. #primary_conninfo = host=master port=5432 user=postgres skytools-3.2.6/python/conf/wal-master.ini0000644000000000000000000000243212426435645015336 0ustar [walmgr] job_name = wal-master logfile = ~/log/%(job_name)s.log pidfile = ~/pid/%(job_name)s.pid use_skylog = 1 master_db = dbname=template1 master_data = /var/lib/postgresql/8.3/main master_config = /etc/postgresql/8.3/main/postgresql.conf master_bin = /usr/lib/postgresql/8.3/bin # set this only if you can afford database restarts during setup and stop. #master_restart_cmd = /etc/init.d/postgresql-8.3 restart slave = slave-host slave_config = /var/lib/postgresql/conf/wal-slave.ini walmgr_data = /var/lib/postgresql/walshipping completed_wals = %(walmgr_data)s/logs.complete partial_wals = %(walmgr_data)s/logs.partial full_backup = %(walmgr_data)s/data.master config_backup = %(walmgr_data)s/config.backup # syncdaemon update frequency loop_delay = 10.0 # use record based shipping available since 8.2 use_xlog_functions = 0 # pass -z to rsync, useful on low bandwidth links compression = 0 # keep symlinks for pg_xlog and pg_log keep_symlinks = 1 # tell walmgr to set wal_level to hot_standby during setup #hot_standby = 1 # periodic sync #command_interval = 600 #periodic_command = /var/lib/postgresql/walshipping/periodic.sh skytools-3.2.6/python/londiste.py0000755000000000000000000001616112426435645014036 0ustar #! /usr/bin/env python """Londiste launcher. """ import sys, os, os.path, optparse import pkgloader pkgloader.require('skytools', '3.0') import skytools # python 2.3 will try londiste.py first... if os.path.exists(os.path.join(sys.path[0], 'londiste.py')) \ and not os.path.isdir(os.path.join(sys.path[0], 'londiste')): del sys.path[0] import londiste, pgq.cascade.admin command_usage = pgq.cascade.admin.command_usage + """ Replication Daemon: worker replay events to subscriber Replication Administration: add-table TBL ... add table to queue remove-table TBL ... remove table from queue change-handler TBL change handler for the table add-seq SEQ ... add sequence to provider remove-seq SEQ ... remove sequence from provider tables show all tables on provider seqs show all sequences on provider missing list tables subscriber has not yet attached to resync TBL ... do full copy again wait-sync wait until all tables are in sync Replication Extra: check compare table structure on both sides fkeys print out fkey drop/create commands compare [TBL ...] compare table contents on both sides repair [TBL ...] repair data on subscriber execute [FILE ...] execute SQL files on set show-handlers [..] show info about all or specific handler Internal Commands: copy copy table logic """ cmd_handlers = ( (('create-root', 'create-branch', 'create-leaf', 'members', 'tag-dead', 'tag-alive', 'change-provider', 'rename-node', 'status', 'node-status', 'pause', 'resume', 'node-info', 'drop-node', 'takeover', 'resurrect'), londiste.LondisteSetup), (('add-table', 'remove-table', 'change-handler', 'add-seq', 'remove-seq', 'tables', 'seqs', 'missing', 'resync', 'wait-sync', 'wait-root', 'wait-provider', 'check', 'fkeys', 'execute'), londiste.LondisteSetup), (('show-handlers',), londiste.LondisteSetup), (('worker',), londiste.Replicator), (('compare',), londiste.Comparator), (('repair',), londiste.Repairer), (('copy',), londiste.CopyTable), ) class Londiste(skytools.DBScript): def __init__(self, args): self.full_args = args skytools.DBScript.__init__(self, 'londiste3', args) if len(self.args) < 2: print("need command") sys.exit(1) cmd = self.args[1] self.script = None for names, cls in cmd_handlers: if cmd in names: self.script = cls(args) break if not self.script: print("Unknown command '%s', use --help for help" % cmd) sys.exit(1) def start(self): self.script.start() def print_ini(self): """Let the Replicator print the default config.""" londiste.Replicator(self.full_args) def init_optparse(self, parser=None): p = super(Londiste, self).init_optparse(parser) p.set_usage(command_usage.strip()) g = optparse.OptionGroup(p, "options for cascading") g.add_option("--provider", help = "init: upstream node temp connect string") g.add_option("--target", metavar = "NODE", help = "switchover: target node") g.add_option("--merge", metavar = "QUEUE", help = "create-leaf: combined queue name") g.add_option("--dead", metavar = "NODE", action = 'append', help = "cascade: assume node is dead") g.add_option("--dead-root", action = 'store_true', help = "takeover: old node was root") g.add_option("--dead-branch", action = 'store_true', help = "takeover: old node was branch") g.add_option("--sync-watermark", metavar = "NODES", help = "create-branch: list of node names to sync wm with") p.add_option_group(g) g = optparse.OptionGroup(p, "repair queue position") g.add_option("--rewind", action = "store_true", help = "change queue position according to destination") g.add_option("--reset", action = "store_true", help = "reset queue position on destination side") p.add_option_group(g) g = optparse.OptionGroup(p, "options for add") g.add_option("--all", action="store_true", help = "add: include all possible tables") g.add_option("--wait-sync", action="store_true", help = "add: wait until all tables are in sync"), g.add_option("--dest-table", metavar = "NAME", help = "add: redirect changes to different table") g.add_option("--expect-sync", action="store_true", dest="expect_sync", help = "add: no copy needed", default=False) g.add_option("--skip-truncate", action="store_true", dest="skip_truncate", help = "add: keep old data", default=False) g.add_option("--create", action="store_true", help = "add: create table/seq if not exist, with minimal schema") g.add_option("--create-full", action="store_true", help = "add: create table/seq if not exist, with full schema") g.add_option("--trigger-flags", help="add: set trigger flags (BAIUDLQ)") g.add_option("--trigger-arg", action="append", help="add: custom trigger arg (can be specified multiple times)") g.add_option("--no-triggers", action="store_true", help="add: do not put triggers on table (makes sense on leaf)") g.add_option("--handler", action="store", help="add: custom handler for table") g.add_option("--handler-arg", action="append", help="add: argument to custom handler") g.add_option("--find-copy-node", dest="find_copy_node", action="store_true", help = "add: walk upstream to find node to copy from") g.add_option("--copy-node", metavar = "NODE", dest="copy_node", help = "add: use NODE as source for initial COPY") g.add_option("--merge-all", action="store_true", help="merge tables from all source queues", default=False) g.add_option("--no-merge", action="store_true", help="don't merge tables from source queues", default=False) g.add_option("--max-parallel-copy", metavar = "NUM", type = "int", help="max number of parallel copy processes") g.add_option("--skip-non-existing", action="store_true", help="add: skip object that does not exist") p.add_option_group(g) g = optparse.OptionGroup(p, "other options") g.add_option("--force", action="store_true", help = "add: ignore table differences, repair: ignore lag") g.add_option("--apply", action = "store_true", help="repair: apply fixes automatically") g.add_option("--count-only", action="store_true", help="compare: just count rows, do not compare data") p.add_option_group(g) return p if __name__ == '__main__': script = Londiste(sys.argv[1:]) script.start() skytools-3.2.6/tests/0000755000000000000000000000000012426435645011454 5ustar skytools-3.2.6/tests/zcheck.sh0000755000000000000000000000010012426435645013251 0ustar #! /bin/sh grep -E 'ERR|WARN|CRIT' log/*.log || echo "All OK" skytools-3.2.6/tests/merge/0000755000000000000000000000000012426435645012553 5ustar skytools-3.2.6/tests/merge/overview.sh0000755000000000000000000000034512426435645014762 0ustar #! /bin/sh for db in part1 full1 full2; do echo "==== $db ===" psql -d $db -c "select * from pgq.get_consumer_info() where not consumer_name like '.%'" psql -d $db -c "select * from pgq_node.local_state order by 1,2" done skytools-3.2.6/tests/merge/addcol-data2.sql0000644000000000000000000000011112426435645015504 0ustar --*-- Local-Table: mydata alter table @mydata@ add column data2 text; skytools-3.2.6/tests/merge/init.sh0000755000000000000000000000031712426435645014056 0ustar #! /bin/sh . ../env.sh lst="part1 part2 part3 part4 full1 full2 full3 full4" ../zstop.sh for db in $lst; do echo dropdb $db dropdb $db done for db in $lst; do echo createdb $db createdb $db done skytools-3.2.6/tests/merge/regen.sh0000755000000000000000000001165312426435645014220 0ustar #! /bin/sh . ../testlib.sh title "Merge" part_list="part1 part2 part3 part4" full_list="full1 full2 full3 full4" merge_list="" for dst in $full_list; do for src in $part_list; do merge_list="$merge_list ${src}_${dst}" done done all_list="$part_list $full_list" kdb_list="`echo $all_list|sed 's/ /,/g'`" for db in $part_list $full_list; do cleardb $db done msg "clean logs" rm -f log/*.log msg "Create configs" # create ticker conf cat > conf/pgqd.ini << EOF [pgqd] database_list = $kdb_list logfile = log/pgqd.log pidfile = pid/pgqd.pid EOF # partition replicas for db in $part_list; do # londiste on part node cat > conf/londiste_$db.ini << EOF [londiste3] job_name = londiste_$db db = dbname=$db queue_name = replika_$db logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid EOF # londiste on combined nodes for dst in full1 full2; do cat > conf/londiste_${db}_${dst}.ini << EOF [londiste3] job_name = londiste_${db}_${dst} db = dbname=$dst queue_name = replika_$db logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid EOF done done # full replicas for db in $full_list; do cat > conf/londiste_$db.ini << EOF [londiste3] job_name = londiste_$db db = dbname=$db queue_name = replika logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid EOF done set -e msg "Create nodes for merged queue" run londiste3 $v conf/londiste_full1.ini create-root fnode1 'dbname=full1' run londiste3 $v conf/londiste_full2.ini create-branch fnode2 'dbname=full2' --provider='dbname=full1' run londiste3 $v conf/londiste_full3.ini create-branch fnode3 'dbname=full3' --provider='dbname=full1' run londiste3 $v conf/londiste_full4.ini create-leaf fnode4 'dbname=full4' --provider='dbname=full2' msg "Create nodes for partition queues" run londiste3 $v conf/londiste_part1.ini create-root p1root 'dbname=part1' run londiste3 $v conf/londiste_part2.ini create-root p2root 'dbname=part2' run londiste3 $v conf/londiste_part3.ini create-root p3root 'dbname=part3' run londiste3 $v conf/londiste_part4.ini create-root p4root 'dbname=part4' msg "Create merge nodes for partition queues" for dst in full1 full2; do for src in $part_list; do run londiste3 $v conf/londiste_${src}_${dst}.ini \ create-leaf merge_${src}_${dst} "dbname=$dst" \ --provider="dbname=$src" --merge="replika" done done msg "Tune PgQ" for db in part1 part2 part3 part4 full1; do run_sql $db "update pgq.queue set queue_ticker_idle_period='3 secs'" done msg "Launch ticker" run pgqd $v -d conf/pgqd.ini msg "Launch londiste worker" for db in $all_list; do run londiste3 $v -d conf/londiste_$db.ini worker done msg "Launch merge londiste" for dst in full1 full2; do for src in $part_list; do run londiste3 $v -d conf/londiste_${src}_${dst}.ini worker done done msg "Create table in partition nodes" for db in $part_list; do run_sql "$db" "create table mydata (id int4 primary key, data text)" done msg "Register table in partition nodes" for db in $part_list; do run londiste3 $v conf/londiste_$db.ini add-table mydata done msg "Wait until add-table events are distributed to leafs" parts=$(echo "$part_list"|wc -w) for db in full1 full2; do cnt=0 while test $cnt -ne $parts; do sleep 5 cnt=`psql ${db} -Atc "select count(*)-1 from londiste.table_info"` echo "$db cnt=$cnt parts=$parts" done done msg "Insert few rows" for n in 1 2 3 4; do run_sql part$n "insert into mydata values ($n, 'part$n')" done msg "Create table and register it in merge nodes" run_sql full1 "create table mydata (id int4 primary key, data text)" run londiste3 $v conf/londiste_full1.ini add-table mydata run londiste3 $v conf/londiste_part1_full1.ini add-table mydata --merge-all msg "Wait until table is in sync on combined-root" cnt=0 while test $cnt -ne 5; do sleep 5 cnt=`psql -A -t -d full1 -c "select count(*) from londiste.table_info where merge_state = 'ok'"` echo "cnt=$cnt" done msg "Create table and register it in full nodes" for db in full2; do run londiste3 $v conf/londiste_$db.ini add-table mydata --create run londiste3 $v conf/londiste_part1_${db}.ini add-table mydata --merge-all done for db in full3 full4; do run londiste3 $v conf/londiste_$db.ini add-table mydata --create done msg "Sleep a bit" run sleep 10 msg "Insert few rows" for n in 1 2 3 4; do run_sql part$n "insert into mydata values (4 + $n, 'part$n')" done run sleep 10 msg "Now check if data apprered" for db in full1; do run_sql $db "select * from mydata order by id" run_sql $db "select * from londiste.table_info order by queue_name" done run_sql full1 "select * from londiste.get_table_list('replika_part1')" run_sql full2 "select * from londiste.get_table_list('replika_part2')" ../zcheck.sh msg "Test EXECUTE through cascade" for db in part1 part2 part3 part4; do run londiste3 $v conf/londiste_$db.ini execute addcol-data2.sql done msg "Sleep a bit" run sleep 10 psql -d part1 -c '\d mydata' psql -d full1 -c '\d mydata' psql -d part1 -c '\d mydata' ../zcheck.sh skytools-3.2.6/tests/env.sh0000644000000000000000000000101712426435645012577 0ustar for dir in . .. ../.. do config=$dir/config.mak test -f $config && break done pfx=`grep ^prefix $config | awk '{ print $3}'` pyver=`python -V 2>&1 | sed 's/Python \([0-9]*.[0-9]*\).*/\1/'` PYTHONPATH=$pfx/lib/python$pyver/site-packages:$PYTHONPATH PATH=$pfx/bin:$PATH #PYTHONPATH=../../python:$PYTHONPATH #PATH=../../python:../../python/bin:../../scripts:$PATH #LD_LIBRARY_PATH=/opt/apps/py26/lib:$LD_LIBRARY_PATH #PATH=/opt/apps/py26/bin:$PATH export PYTHONPATH PATH LD_LIBRARY_PATH PATH PGHOST=localhost export PGHOST skytools-3.2.6/tests/newloader/0000755000000000000000000000000012426435645013434 5ustar skytools-3.2.6/tests/newloader/zcheck.sh0000755000000000000000000000010012426435645015231 0ustar #! /bin/sh grep -E 'ERR|WARN|CRIT' log/*.log || echo "All OK" skytools-3.2.6/tests/newloader/triggers.sql0000644000000000000000000000123212426435645016001 0ustar create trigger logger after insert or update or delete on data.simple_tbl for each row execute procedure pgq.logutriga('loaderq'); create trigger logger after insert or update or delete on data.bulk_tbl for each row execute procedure pgq.logutriga('loaderq'); create trigger logger after insert or update or delete on data.keep_all_tbl for each row execute procedure pgq.logutriga('loaderq'); create trigger logger after insert or update or delete on data.keep_latest_tbl for each row execute procedure pgq.logutriga('loaderq'); create trigger logger after insert or update or delete on data.random_tbl for each row execute procedure pgq.logutriga('loaderq'); skytools-3.2.6/tests/newloader/tables.sql0000644000000000000000000000134712426435645015434 0ustar set client_min_messages = 'warning'; create schema data; create table data.simple_tbl ( username text not null, contactname text not null, data text, primary key (username, contactname) ); create table data.bulk_tbl ( id serial primary key, data text ); create table data.keep_all_tbl ( id serial primary key, username text not null, tstamp timestamptz not null default now(), data text ); create table data.keep_latest_tbl ( id serial primary key, username text not null, tstamp timestamptz not null default now(), data text ); create table data.random_tbl ( id serial primary key, username text not null, tstamp timestamptz not null default now(), data text ); skytools-3.2.6/tests/newloader/zstop.sh0000755000000000000000000000025612426435645015155 0ustar #! /bin/sh #. ../env.sh for p in pid/*.pid*; do test -f "$p" || continue pid=`cat "$p"` test -d "/proc/$pid" || { rm -f "$p" continue } kill "$pid" done skytools-3.2.6/tests/newloader/conf/0000755000000000000000000000000012426435645014361 5ustar skytools-3.2.6/tests/newloader/conf/setadm_loaderq.ini0000644000000000000000000000023512426435645020046 0ustar [cascade_admin] job_name = setadm_loaderq node_db = dbname=loadersrc queue_name = loaderq logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid skytools-3.2.6/tests/newloader/conf/loader_src.ini0000644000000000000000000000022012426435645017171 0ustar [queue_loader] job_name = loader_src db = dbname=loadersrc queue_name = loaderq logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid skytools-3.2.6/tests/newloader/conf/loader_dst.ini0000644000000000000000000000463512426435645017212 0ustar [queue_loader] job_name = loader_dst db = dbname=loaderdst queue_name = loaderq logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid rename_tables = [data.simple_tbl] table_mode = direct [data.bulk_tbl] table_mode = direct row_mode = bulk [data.keep_all_tbl] table_mode = split row_mode = bulk split_mode = by-date-field:tstamp ### Non-inherited partitions split_part_template = create table %%(part)s (like %%(parent)s); alter table only %%(part)s add primary key (%%(pkey)s); [data.keep_latest_tbl] table_mode = split row_mode = bulk split_mode = by-date-field:tstamp ### Inherited partitions split_part_template = create table %%(part)s () inherits (%%(parent)s); alter table only %%(part)s add primary key (%%(pkey)s); [DEFAULT] # fields - which fields to send through #fields = col1, col2, col3:renamed3 #fields = * # table_mode - how to handle a table # # ignore - ignore this table # direct - update table directly # split - split data into partitions #table_mode = ignore # split_mode - how to split, if requested # # by-batch-time: use batch time for splitting # by-event-time: use event time for splitting # by-date-field:fld - use fld for splitting #split_mode = by-batch-time # split_part - partition name format # # %(table_name)s %(year)s %(month)s %(day)s %(hour)s #split_part = %(table_name)s_%(year)s_%(month)s_%(day)s # split_part_template - How to create new partition tables # # Available fields: # %(part)s # %(parent)s # %(pkey)s # ### Non-inherited partitions #split_part_template = # create table %%(part)s (like %%(parent)s); # alter table only %%(part)s add primary key (%%(pkey)s); # ### Inherited partitions #split_part_template = # create table %%(part)s () inherits (%%(parent)s); # alter table only %%(part)s add primary key (%%(pkey)s); # row_mode - How to apply the events # # plain - each event creates SQL statement to run # keep_latest - change updates to DELETE + INSERT # keep_all - change updates to inserts, ignore deletes # bulk - instead of statement-per-row, do bulk updates #row_mode = plain # bulk_mode - How to do the bulk update # # correct - inserts as COPY into table, # update as COPY into temp table and single UPDATE from there # delete as COPY into temp table and single DELETE from there # delete - as 'correct', but do update as DELETE + COPY # merged - as 'delete', but merge insert rows with update rows #bulk_mode=correct skytools-3.2.6/tests/newloader/conf/ticker_loadersrc.ini0000644000000000000000000000021212426435645020374 0ustar [pgqadm] job_name = ticker_loadersrc db = dbname=loadersrc loop_delay = 0.5 logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid skytools-3.2.6/tests/newloader/init.sh0000755000000000000000000000024612426435645014740 0ustar #! /bin/sh . ../env.sh lst="loadersrc loaderdst" for db in $lst; do echo dropdb $db dropdb $db done for db in $lst; do echo createdb $db createdb $db done skytools-3.2.6/tests/newloader/regen.sh0000755000000000000000000000230212426435645015070 0ustar #! /bin/sh . ../env.sh mkdir -p log pid conf ./zstop.sh v= v=-q v=-v (cd ../..; make -s python-install ) echo "" cleardb() { echo "Clearing database $1" psql -q -d $1 -c ' set client_min_messages=warning; drop schema if exists londiste cascade; drop schema if exists pgq_node cascade; drop schema if exists pgq cascade; drop schema if exists data cascade; ' } run() { echo "$ $*" "$@" } db_list="loadersrc loaderdst" for db in $db_list; do cleardb $db done echo "clean logs" rm -f log/*.log set -e run setadm $v conf/setadm_loaderq.ini create-root ldr-src 'dbname=loadersrc' --worker=loader_src run setadm $v conf/setadm_loaderq.ini create-leaf ldr-dst 'dbname=loaderdst' --worker=loader_dst --provider="dbname=loadersrc" run pgqadm $v conf/ticker_loadersrc.ini -d ticker run queue_loader $v -d conf/loader_src.ini run queue_loader $v -d conf/loader_dst.ini run psql -d loadersrc -f tables.sql run psql -d loadersrc -f triggers.sql run psql -d loaderdst -f tables.sql run psql -d loadersrc -f send.data.sql run psql -d loadersrc -f send.data.sql run psql -d loadersrc -f send.data.sql run sleep 2 run setadm $v conf/setadm_loaderq.ini status ./zcheck.sh skytools-3.2.6/tests/newloader/send.data.sql0000644000000000000000000000106712426435645016022 0ustar insert into data.simple_tbl (username, contactname, data) values ('randuser'||random()::text, 'randcontact'||random()::text, 'link'); /* insert into data.simple_tbl (username, contactname, data) values ('sameuser', 'samecontact', 'link'); update data.simple_tbl */ insert into data.bulk_tbl (data) values ('newdata'); insert into data.keep_all_tbl (username, data) values ('sameuser', 'newdata'); insert into data.keep_latest_tbl (username, data) values ('sameuser', 'newdata'); insert into data.random_tbl (username, data) values ('sameuser', 'newdata'); skytools-3.2.6/tests/walmgr/0000755000000000000000000000000012426435645012745 5ustar skytools-3.2.6/tests/walmgr/conf.master/0000755000000000000000000000000012426435645015164 5ustar skytools-3.2.6/tests/walmgr/conf.master/pg_hba.conf0000644000000000000000000000650412426435645017260 0ustar # PostgreSQL Client Authentication Configuration File # =================================================== # # Refer to the PostgreSQL Administrator's Guide, chapter "Client # Authentication" for a complete description. A short synopsis # follows. # # This file controls: which hosts are allowed to connect, how clients # are authenticated, which PostgreSQL user names they can use, which # databases they can access. Records take one of these forms: # # local DATABASE USER METHOD [OPTION] # host DATABASE USER CIDR-ADDRESS METHOD [OPTION] # hostssl DATABASE USER CIDR-ADDRESS METHOD [OPTION] # hostnossl DATABASE USER CIDR-ADDRESS METHOD [OPTION] # # (The uppercase items must be replaced by actual values.) # # The first field is the connection type: "local" is a Unix-domain socket, # "host" is either a plain or SSL-encrypted TCP/IP socket, "hostssl" is an # SSL-encrypted TCP/IP socket, and "hostnossl" is a plain TCP/IP socket. # # DATABASE can be "all", "sameuser", "samerole", a database name, or # a comma-separated list thereof. # # USER can be "all", a user name, a group name prefixed with "+", or # a comma-separated list thereof. In both the DATABASE and USER fields # you can also write a file name prefixed with "@" to include names from # a separate file. # # CIDR-ADDRESS specifies the set of hosts the record matches. # It is made up of an IP address and a CIDR mask that is an integer # (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that specifies # the number of significant bits in the mask. Alternatively, you can write # an IP address and netmask in separate columns to specify the set of hosts. # # METHOD can be "trust", "reject", "md5", "crypt", "password", # "krb5", "ident", or "pam". Note that "password" sends passwords # in clear text; "md5" is preferred since it sends encrypted passwords. # # OPTION is the ident map or the name of the PAM service, depending on METHOD. # # Database and user names containing spaces, commas, quotes and other special # characters must be quoted. Quoting one of the keywords "all", "sameuser" or # "samerole" makes the name lose its special character, and just match a # database or username with that name. # # This file is read on server startup and when the postmaster receives # a SIGHUP signal. If you edit the file on a running system, you have # to SIGHUP the postmaster for the changes to take effect. You can use # "pg_ctl reload" to do that. # Put your actual configuration here # ---------------------------------- # # If you want to allow non-local connections, you need to add more # "host" records. In that case you will also need to make PostgreSQL listen # on a non-local interface via the listen_addresses configuration parameter, # or via the -i or -h command line switches. # # CAUTION: Configuring the system for local "trust" authentication allows # any local user to connect as any PostgreSQL user, including the database # superuser. If you do not trust all your local users, use another # authentication method. # TYPE DATABASE USER CIDR-ADDRESS METHOD # "local" is for Unix domain socket connections only local all all trust # IPv4 local connections: host all all 127.0.0.1/32 trust # IPv6 local connections: host all all ::1/128 trust skytools-3.2.6/tests/walmgr/conf.master/pg_ident.conf0000644000000000000000000000266412426435645017634 0ustar # PostgreSQL Ident Authentication Maps # ==================================== # # Refer to the PostgreSQL Administrator's Guide, chapter "Client # Authentication" for a complete description. A short synopsis # follows. # # This file controls PostgreSQL ident-based authentication. It maps # ident user names (typically Unix user names) to their corresponding # PostgreSQL user names. Records are of the form: # # MAPNAME IDENT-USERNAME PG-USERNAME # # (The uppercase quantities must be replaced by actual values.) # # MAPNAME is the (otherwise freely chosen) map name that was used in # pg_hba.conf. IDENT-USERNAME is the detected user name of the # client. PG-USERNAME is the requested PostgreSQL user name. The # existence of a record specifies that IDENT-USERNAME may connect as # PG-USERNAME. Multiple maps may be specified in this file and used # by pg_hba.conf. # # This file is read on server startup and when the postmaster receives # a SIGHUP signal. If you edit the file on a running system, you have # to SIGHUP the postmaster for the changes to take effect. You can use # "pg_ctl reload" to do that. # Put your actual configuration here # ---------------------------------- # # No map names are defined in the default configuration. If all ident # user names and PostgreSQL user names are the same, you don't need # this file. Instead, use the special map name "sameuser" in # pg_hba.conf. # MAPNAME IDENT-USERNAME PG-USERNAME skytools-3.2.6/tests/walmgr/conf.master/postgresql.conf0000644000000000000000000000071312426435645020237 0ustar # - Connection Settings - #port = 5432 port = 7200 unix_socket_directory = '/tmp/waltest' #archive_command = '' # command to use to archive a logfile # segment # These settings are initialized by initdb -- they might be changed lc_messages = 'C' # locale for system error message # strings lc_monetary = 'C' # locale for monetary formatting lc_numeric = 'C' # locale for number formatting lc_time = 'C' # locale for time formatting skytools-3.2.6/tests/walmgr/conf.slave/0000755000000000000000000000000012426435645015003 5ustar skytools-3.2.6/tests/walmgr/conf.slave/pg_hba.conf0000644000000000000000000000650412426435645017077 0ustar # PostgreSQL Client Authentication Configuration File # =================================================== # # Refer to the PostgreSQL Administrator's Guide, chapter "Client # Authentication" for a complete description. A short synopsis # follows. # # This file controls: which hosts are allowed to connect, how clients # are authenticated, which PostgreSQL user names they can use, which # databases they can access. Records take one of these forms: # # local DATABASE USER METHOD [OPTION] # host DATABASE USER CIDR-ADDRESS METHOD [OPTION] # hostssl DATABASE USER CIDR-ADDRESS METHOD [OPTION] # hostnossl DATABASE USER CIDR-ADDRESS METHOD [OPTION] # # (The uppercase items must be replaced by actual values.) # # The first field is the connection type: "local" is a Unix-domain socket, # "host" is either a plain or SSL-encrypted TCP/IP socket, "hostssl" is an # SSL-encrypted TCP/IP socket, and "hostnossl" is a plain TCP/IP socket. # # DATABASE can be "all", "sameuser", "samerole", a database name, or # a comma-separated list thereof. # # USER can be "all", a user name, a group name prefixed with "+", or # a comma-separated list thereof. In both the DATABASE and USER fields # you can also write a file name prefixed with "@" to include names from # a separate file. # # CIDR-ADDRESS specifies the set of hosts the record matches. # It is made up of an IP address and a CIDR mask that is an integer # (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that specifies # the number of significant bits in the mask. Alternatively, you can write # an IP address and netmask in separate columns to specify the set of hosts. # # METHOD can be "trust", "reject", "md5", "crypt", "password", # "krb5", "ident", or "pam". Note that "password" sends passwords # in clear text; "md5" is preferred since it sends encrypted passwords. # # OPTION is the ident map or the name of the PAM service, depending on METHOD. # # Database and user names containing spaces, commas, quotes and other special # characters must be quoted. Quoting one of the keywords "all", "sameuser" or # "samerole" makes the name lose its special character, and just match a # database or username with that name. # # This file is read on server startup and when the postmaster receives # a SIGHUP signal. If you edit the file on a running system, you have # to SIGHUP the postmaster for the changes to take effect. You can use # "pg_ctl reload" to do that. # Put your actual configuration here # ---------------------------------- # # If you want to allow non-local connections, you need to add more # "host" records. In that case you will also need to make PostgreSQL listen # on a non-local interface via the listen_addresses configuration parameter, # or via the -i or -h command line switches. # # CAUTION: Configuring the system for local "trust" authentication allows # any local user to connect as any PostgreSQL user, including the database # superuser. If you do not trust all your local users, use another # authentication method. # TYPE DATABASE USER CIDR-ADDRESS METHOD # "local" is for Unix domain socket connections only local all all trust # IPv4 local connections: host all all 127.0.0.1/32 trust # IPv6 local connections: host all all ::1/128 trust skytools-3.2.6/tests/walmgr/conf.slave/pg_ident.conf0000644000000000000000000000266412426435645017453 0ustar # PostgreSQL Ident Authentication Maps # ==================================== # # Refer to the PostgreSQL Administrator's Guide, chapter "Client # Authentication" for a complete description. A short synopsis # follows. # # This file controls PostgreSQL ident-based authentication. It maps # ident user names (typically Unix user names) to their corresponding # PostgreSQL user names. Records are of the form: # # MAPNAME IDENT-USERNAME PG-USERNAME # # (The uppercase quantities must be replaced by actual values.) # # MAPNAME is the (otherwise freely chosen) map name that was used in # pg_hba.conf. IDENT-USERNAME is the detected user name of the # client. PG-USERNAME is the requested PostgreSQL user name. The # existence of a record specifies that IDENT-USERNAME may connect as # PG-USERNAME. Multiple maps may be specified in this file and used # by pg_hba.conf. # # This file is read on server startup and when the postmaster receives # a SIGHUP signal. If you edit the file on a running system, you have # to SIGHUP the postmaster for the changes to take effect. You can use # "pg_ctl reload" to do that. # Put your actual configuration here # ---------------------------------- # # No map names are defined in the default configuration. If all ident # user names and PostgreSQL user names are the same, you don't need # this file. Instead, use the special map name "sameuser" in # pg_hba.conf. # MAPNAME IDENT-USERNAME PG-USERNAME skytools-3.2.6/tests/walmgr/conf.slave/postgresql.conf0000644000000000000000000000115012426435645020052 0ustar #listen_addresses = 'localhost' # what IP address(es) to listen on; # comma-separated list of addresses; # defaults to 'localhost', '*' = all port = 7201 max_connections = 100 #superuser_reserved_connections = 2 unix_socket_directory = '/tmp/waltest' shared_buffers = 1000 # min 16 or max_connections*2, 8KB each # These settings are initialized by initdb -- they might be changed lc_messages = 'C' # locale for system error message # strings lc_monetary = 'C' # locale for monetary formatting lc_numeric = 'C' # locale for number formatting lc_time = 'C' # locale for time formatting skytools-3.2.6/tests/walmgr/run-test.sh0000755000000000000000000000462112426435645015070 0ustar #! /bin/sh set -e . ../env.sh tmp=/tmp/waltest src=$PWD walmgr=$src/../../python/walmgr.py test -f $tmp/data.master/postmaster.pid \ && kill `head -1 $tmp/data.master/postmaster.pid` || true test -f $tmp/data.slave/postmaster.pid \ && kill `head -1 $tmp/data.slave/postmaster.pid` || true rm -rf $tmp mkdir -p $tmp cd $tmp LANG=C PATH=/usr/lib/postgresql/8.2/bin:$PATH export PATH LANG mkdir log slave slave/logs.complete slave/logs.partial # # Prepare configs # ### wal.master.ini ### cat > wal.master.ini < wal.slave.ini < rc.slave < log/initdb.log 2>&1 cp $src/conf.master/*.conf data.master/ pg_ctl -D data.master -l log/pg.master.log start sleep 4 createdb -h /tmp/waltest -p 7200 echo '####' $walmgr $tmp/wal.master.ini setup $walmgr wal.master.ini setup echo '####' $walmgr $tmp/wal.master.ini backup $walmgr wal.master.ini backup psql -c "create table t as select * from now()" -p 7200 -h /tmp/waltest echo '####' $walmgr $tmp/wal.slave.ini restore $walmgr $tmp/wal.slave.ini restore sleep 10 echo '####' $walmgr $tmp/wal.master.ini sync $walmgr wal.master.ini sync echo '####' $walmgr $tmp/wal.slave.ini boot $walmgr $tmp/wal.slave.ini boot sleep 4 psql -c "select * from t" -p 7201 -h /tmp/waltest pg_ctl -D data.master stop pg_ctl -D data.slave stop skytools-3.2.6/tests/londiste/0000755000000000000000000000000012426435645013275 5ustar skytools-3.2.6/tests/londiste/test-weird-merge.sh0000755000000000000000000000723312426435645017025 0ustar #! /bin/bash . ../testlib.sh ../zstop.sh v='-q' v='' nocheck=1 db_list="db1 db2 db3 db4 db5" kdb_list=`echo $db_list | sed 's/ /,/g'` #( cd ../..; make -s install ) do_check() { test $nocheck = 1 || ../zcheck.sh } title "Merge + qnode test" # create ticker conf cat > conf/pgqd.ini < conf/${xname}.ini < conf/pgqd.ini < conf/londiste_$db.ini < conf/pgqd.ini < conf/londiste_$db.ini < conf/gen$n.ini < conf/pgqd.ini < conf/londiste_$db.ini < conf/pgqd.ini < conf/londiste_$db.ini < conf/gen$n.ini < conf/pgqd.ini << EOF [pgqd] database_list = $kdb_list logfile = log/pgqd.log pidfile = pid/pgqd.pid EOF # full replicas for db in $full_list; do cat > conf/londiste_$db.ini << EOF [londiste3] job_name = londiste_$db db = dbname=$db queue_name = replika logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid EOF # part replicas for dst in $part_list; do cat > conf/londiste_${db}_${dst}.ini << EOF [londiste3] job_name = londiste_${db}_${dst} db = dbname=$dst queue_name = replika logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid EOF done done set -e msg "Create nodes for full queue" run londiste3 $v conf/londiste_full1.ini create-root root_full1 'dbname=full1' #run londiste3 $v conf/londiste_full2.ini create-branch branch_full2 'dbname=full2' --provider='dbname=full1' msg "Create nodes for replicas" for dst in $part_list; do for src in $full_list; do run londiste3 $v conf/londiste_${src}_${dst}.ini \ create-leaf leaf_${src}_${dst} "dbname=$dst" \ --provider="dbname=$src" done done #msg "Create nodes for partition root queues" #for db in $part_list; do #run londiste3 $v conf/londiste_$db.ini create-root root_$db "dbname=$db" #done msg "Tune PgQ" for db in $all_list; do run_sql $db "update pgq.queue set queue_ticker_idle_period='3 secs'" done msg "Launch ticker" run pgqd $v -d conf/pgqd.ini msg "Launch londiste worker" for db in $full_list; do run londiste3 $v -d conf/londiste_$db.ini worker done msg "Launch merge londiste" for src in $full_list; do for dst in $part_list; do run londiste3 $v -d conf/londiste_${src}_${dst}.ini worker done done msg "Create partconf in partition nodes" part_count=$(echo $part_list|wc -w) max_part=$(( $part_count-1 )) i=0 for db in $part_list; do run psql $db < conf/pgqd.ini << EOF [pgqd] database_list = $kdb_list logfile = log/pgqd.log pidfile = pid/pgqd.pid EOF # partition replicas for db in $part_list; do # londiste on part node cat > conf/londiste_$db.ini << EOF [londiste3] job_name = londiste_$db db = dbname=$db queue_name = replika_$db logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid EOF cat > conf/londiste_${db}_full.ini << EOF [londiste3] job_name = londiste_${db}_full db = dbname=full queue_name = replika_$db logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid EOF done for f in conf/*.ini; do echo "$f" echo "------------------" cat "$f" echo "------------------" echo done set -e msg "Create nodes for partition queues" for src in $part_list; do run londiste3 $v conf/londiste_${src}.ini create-root ${src}_root "dbname=${src}" done msg "Create merge nodes for partition queues" for src in $part_list; do run londiste3 $v conf/londiste_${src}_full.ini \ create-leaf merge_${src}_full "dbname=full" --provider="dbname=$src" done msg "Optimize pgq for testing to handle empty ticks faster" for db in $all_list; do run_sql $db "update pgq.queue set queue_ticker_idle_period='3 secs'" done msg "Launch ticker" run pgqd $v -d conf/pgqd.ini msg "Launch londiste worker" for db in $part_list; do run londiste3 $v -d conf/londiste_$db.ini worker done msg "Launch merge londiste" for src in $part_list; do run londiste3 $v -d conf/londiste_${src}_full.ini worker done msg "Create table in partition nodes" for db in $part_list; do run_sql "$db" "create table mydata (id int4 primary key, data text)" done msg "Register table in partition nodes" for db in $part_list; do run londiste3 $v conf/londiste_$db.ini add-table mydata done msg "Wait for cascade sync (root->leaf)" for src in $part_list; do run londiste3 $v conf/londiste_${src}_full.ini wait-root done msg "Insert few rows" for n in 1 2; do run_sql part$n "insert into mydata values ($n, 'part$n')" done msg "Create table and register it in merge nodes" run_sql full "create table mydata (id int4 primary key, data text)" run londiste3 $v conf/londiste_part1_full.ini add-table mydata --merge-all msg "Wait for replica to cach up" for src in $part_list; do run londiste3 $v conf/londiste_${src}_full.ini wait-sync done msg "Insert few rows" for n in 1 2; do run_sql part$n "insert into mydata values (2 + $n, 'part$n')" done msg "Now check if data apprered" run_sql full "select * from mydata order by id" run_sql full "select table_name, local, merge_state, table_attrs, dest_table from londiste.get_table_list('replika_part1')" run_sql full "select table_name, local, merge_state, table_attrs, dest_table from londiste.get_table_list('replika_part2')" ../zcheck.sh msg "Test EXECUTE through cascade" for db in $part_list; do run londiste3 $v conf/londiste_$db.ini execute addcol-data2.sql # do one by one to avoid deadlock on merge side when several ddl's are received simultaneously run londiste3 $v conf/londiste_${src}_full.ini wait-root done msg "Insert more rows with more columns" for n in 1 2; do run_sql part$n "insert into mydata values (4 + $n, 'part$n', 'x')" done msg "Wait for cascade sync (root->leaf)" for src in $part_list; do run londiste3 $v conf/londiste_${src}_full.ini wait-root done psql -d part1 -c 'select * from mydata order by 1;' psql -d part2 -c 'select * from mydata order by 1;' psql -d full -c 'select * from mydata order by 1;' ../zcheck.sh skytools-3.2.6/tests/merge_qnode/0000755000000000000000000000000012426435645013741 5ustar skytools-3.2.6/tests/merge_qnode/overview.sh0000755000000000000000000000034512426435645016150 0ustar #! /bin/sh for db in part1 full1 full2; do echo "==== $db ===" psql -d $db -c "select * from pgq.get_consumer_info() where not consumer_name like '.%'" psql -d $db -c "select * from pgq_node.local_state order by 1,2" done skytools-3.2.6/tests/merge_qnode/addcol-data2.sql0000644000000000000000000000011112426435645016672 0ustar --*-- Local-Table: mydata alter table @mydata@ add column data2 text; skytools-3.2.6/tests/merge_qnode/init.sh0000755000000000000000000000034412426435645015244 0ustar #! /bin/sh . ../env.sh lst="part1 part2 qn1 qn2 full" ../zstop.sh for db in $lst; do echo dropdb $db psql postgres -q -c "drop database if exists \"$db\";" done for db in $lst; do echo createdb $db createdb $db done skytools-3.2.6/tests/merge_qnode/regen.sh0000755000000000000000000001010312426435645015373 0ustar #! /bin/sh . ../testlib.sh title "Merge several shards into one database" part_list="part1 part2" qn_list='qn1 qn2' full_list="full" all_list="$part_list $qn_list $full_list" kdb_list="`echo $all_list|sed 's/ /,/g'`" for db in $all_list; do cleardb $db done clearlogs msg "Create configs for pgqd and londiste processes" # create ticker conf cat > conf/pgqd.ini << EOF [pgqd] database_list = $kdb_list logfile = log/pgqd.log pidfile = pid/pgqd.pid EOF # partition replicas for n in 1 2; do # londiste on part node cat > conf/londiste_part$n.ini << EOF [londiste3] job_name = londiste_part$n db = dbname=part$n queue_name = replika_part$n logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid EOF cat > conf/londiste_part${n}_qn$n.ini << EOF [londiste3] job_name = londiste_qn$n db = dbname=qn$n queue_name = replika_part$n logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid EOF cat > conf/londiste_qn${n}_full.ini << EOF [londiste3] job_name = londiste_qn${n}_full db = dbname=full queue_name = replika_part$n logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid EOF done for f in conf/*.ini; do echo "$f" echo "------------------" cat "$f" echo "------------------" echo done set -e msg "Create both cascades root (shard) -> branch (qnode) -> leaf (merge). Also installs pgq and londiste into db modules" for n in 1 2; do run londiste3 $v conf/londiste_part${n}.ini create-root part${n}_root "dbname=part${n}" run londiste3 $v conf/londiste_qn${n}.ini \ create-branch qn${n} "dbname=qn${n}" --provider="dbname=part${n}" run londiste3 $v conf/londiste_qn${n}_full.ini \ create-leaf merge_qn${n}_full "dbname=full" --provider="dbname=qn${n}" done msg "Optimize pgq for testing to handle empty ticks faster" for db in $all_list; do run_sql $db "update pgq.queue set queue_ticker_idle_period='3 secs'" done msg "Launch ticker" run pgqd $v -d conf/pgqd.ini msg "Launch workers" for n in 1 2; do run londiste3 $v -d conf/londiste_part${n}.ini worker run londiste3 $v -d conf/londiste_qn${n}.ini worker run londiste3 $v -d conf/londiste_qn${n}_full.ini worker done msg "Create table in partition nodes and in target database" for db in $part_list; do run_sql "$db" "create table mydata (id int4 primary key, data text)" done run_sql full "create table mydata (id int4 primary key, data text)" msg "Register table in partition nodes" for db in $part_list; do run londiste3 $v conf/londiste_$db.ini add-table mydata done msg "Wait for cascade sync (root->leaf). Leaf must know that we have this table in root" for src in $qn_list; do run londiste3 $v conf/londiste_${src}_full.ini wait-root done msg "Add table into merge node" run londiste3 -q conf/londiste_qn1_full.ini add-table public.mydata --find-copy-node --merge-all msg "Insert few rows" for n in 1 2; do run_sql part$n "insert into mydata values ($n, 'part$n')" run_sql part$n "insert into mydata values (2 + $n, 'part$n')" done msg "Wait for replica to cach up" for src in $qn_list; do run londiste3 $v conf/londiste_${src}_full.ini wait-sync done msg "Now check if data apprered" run_sql full "select * from mydata order by id" run_sql full "select table_name, local, merge_state, table_attrs, dest_table from londiste.get_table_list('replika_part1')" run_sql full "select table_name, local, merge_state, table_attrs, dest_table from londiste.get_table_list('replika_part2')" ../zcheck.sh msg "Test EXECUTE through cascade" for n in 1 2; do run londiste3 $v conf/londiste_part$n.ini execute addcol-data2.sql # do one by one to avoid deadlock on merge side when several ddl's are received simultaneously run londiste3 $v conf/londiste_qn${n}_full.ini wait-root done msg "Insert more rows with more columns" for n in 1 2; do run_sql part$n "insert into mydata values (4 + $n, 'part$n', 'x')" done msg "Wait for cascade sync (root->leaf)" for src in $qn_list; do run londiste3 $v conf/londiste_${src}_full.ini wait-root done psql -d part1 -c 'select * from mydata order by 1;' psql -d part2 -c 'select * from mydata order by 1;' psql -d full -c 'select * from mydata order by 1;' ../zcheck.sh skytools-3.2.6/tests/queue_loader/0000755000000000000000000000000012426435645014126 5ustar skytools-3.2.6/tests/queue_loader/zcheck.sh0000755000000000000000000000010012426435645015723 0ustar #! /bin/sh grep -E 'ERR|WARN|CRIT' log/*.log || echo "All OK" skytools-3.2.6/tests/queue_loader/triggers.sql0000644000000000000000000000123212426435645016473 0ustar create trigger logger after insert or update or delete on data.simple_tbl for each row execute procedure pgq.logutriga('loaderq'); create trigger logger after insert or update or delete on data.bulk_tbl for each row execute procedure pgq.logutriga('loaderq'); create trigger logger after insert or update or delete on data.keep_all_tbl for each row execute procedure pgq.logutriga('loaderq'); create trigger logger after insert or update or delete on data.keep_latest_tbl for each row execute procedure pgq.logutriga('loaderq'); create trigger logger after insert or update or delete on data.random_tbl for each row execute procedure pgq.logutriga('loaderq'); skytools-3.2.6/tests/queue_loader/tables.sql0000644000000000000000000000134712426435645016126 0ustar set client_min_messages = 'warning'; create schema data; create table data.simple_tbl ( username text not null, contactname text not null, data text, primary key (username, contactname) ); create table data.bulk_tbl ( id serial primary key, data text ); create table data.keep_all_tbl ( id serial primary key, username text not null, tstamp timestamptz not null default now(), data text ); create table data.keep_latest_tbl ( id serial primary key, username text not null, tstamp timestamptz not null default now(), data text ); create table data.random_tbl ( id serial primary key, username text not null, tstamp timestamptz not null default now(), data text ); skytools-3.2.6/tests/queue_loader/zstop.sh0000755000000000000000000000025612426435645015647 0ustar #! /bin/sh #. ../env.sh for p in pid/*.pid*; do test -f "$p" || continue pid=`cat "$p"` test -d "/proc/$pid" || { rm -f "$p" continue } kill "$pid" done skytools-3.2.6/tests/queue_loader/conf/0000755000000000000000000000000012426435645015053 5ustar skytools-3.2.6/tests/queue_loader/conf/setadm_loaderq.ini0000644000000000000000000000023512426435645020540 0ustar [cascade_admin] job_name = setadm_loaderq node_db = dbname=loadersrc queue_name = loaderq logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid skytools-3.2.6/tests/queue_loader/conf/loader_src.ini0000644000000000000000000000022012426435645017663 0ustar [queue_loader] job_name = loader_src db = dbname=loadersrc queue_name = loaderq logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid skytools-3.2.6/tests/queue_loader/conf/loader_dst.ini0000644000000000000000000000463512426435645017704 0ustar [queue_loader] job_name = loader_dst db = dbname=loaderdst queue_name = loaderq logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid rename_tables = [data.simple_tbl] table_mode = direct [data.bulk_tbl] table_mode = direct row_mode = bulk [data.keep_all_tbl] table_mode = split row_mode = bulk split_mode = by-date-field:tstamp ### Non-inherited partitions split_part_template = create table %%(part)s (like %%(parent)s); alter table only %%(part)s add primary key (%%(pkey)s); [data.keep_latest_tbl] table_mode = split row_mode = bulk split_mode = by-date-field:tstamp ### Inherited partitions split_part_template = create table %%(part)s () inherits (%%(parent)s); alter table only %%(part)s add primary key (%%(pkey)s); [DEFAULT] # fields - which fields to send through #fields = col1, col2, col3:renamed3 #fields = * # table_mode - how to handle a table # # ignore - ignore this table # direct - update table directly # split - split data into partitions #table_mode = ignore # split_mode - how to split, if requested # # by-batch-time: use batch time for splitting # by-event-time: use event time for splitting # by-date-field:fld - use fld for splitting #split_mode = by-batch-time # split_part - partition name format # # %(table_name)s %(year)s %(month)s %(day)s %(hour)s #split_part = %(table_name)s_%(year)s_%(month)s_%(day)s # split_part_template - How to create new partition tables # # Available fields: # %(part)s # %(parent)s # %(pkey)s # ### Non-inherited partitions #split_part_template = # create table %%(part)s (like %%(parent)s); # alter table only %%(part)s add primary key (%%(pkey)s); # ### Inherited partitions #split_part_template = # create table %%(part)s () inherits (%%(parent)s); # alter table only %%(part)s add primary key (%%(pkey)s); # row_mode - How to apply the events # # plain - each event creates SQL statement to run # keep_latest - change updates to DELETE + INSERT # keep_all - change updates to inserts, ignore deletes # bulk - instead of statement-per-row, do bulk updates #row_mode = plain # bulk_mode - How to do the bulk update # # correct - inserts as COPY into table, # update as COPY into temp table and single UPDATE from there # delete as COPY into temp table and single DELETE from there # delete - as 'correct', but do update as DELETE + COPY # merged - as 'delete', but merge insert rows with update rows #bulk_mode=correct skytools-3.2.6/tests/queue_loader/conf/ticker_loadersrc.ini0000644000000000000000000000021212426435645021066 0ustar [pgqadm] job_name = ticker_loadersrc db = dbname=loadersrc loop_delay = 0.5 logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid skytools-3.2.6/tests/queue_loader/init.sh0000755000000000000000000000024612426435645015432 0ustar #! /bin/sh . ../env.sh lst="loadersrc loaderdst" for db in $lst; do echo dropdb $db dropdb $db done for db in $lst; do echo createdb $db createdb $db done skytools-3.2.6/tests/queue_loader/regen.sh0000755000000000000000000000230212426435645015562 0ustar #! /bin/sh . ../env.sh mkdir -p log pid conf ./zstop.sh v= v=-q v=-v (cd ../..; make -s python-install ) echo "" cleardb() { echo "Clearing database $1" psql -q -d $1 -c ' set client_min_messages=warning; drop schema if exists londiste cascade; drop schema if exists pgq_node cascade; drop schema if exists pgq cascade; drop schema if exists data cascade; ' } run() { echo "$ $*" "$@" } db_list="loadersrc loaderdst" for db in $db_list; do cleardb $db done echo "clean logs" rm -f log/*.log set -e run setadm $v conf/setadm_loaderq.ini create-root ldr-src 'dbname=loadersrc' --worker=loader_src run setadm $v conf/setadm_loaderq.ini create-leaf ldr-dst 'dbname=loaderdst' --worker=loader_dst --provider="dbname=loadersrc" run pgqadm $v conf/ticker_loadersrc.ini -d ticker run queue_loader $v -d conf/loader_src.ini run queue_loader $v -d conf/loader_dst.ini run psql -d loadersrc -f tables.sql run psql -d loadersrc -f triggers.sql run psql -d loaderdst -f tables.sql run psql -d loadersrc -f send.data.sql run psql -d loadersrc -f send.data.sql run psql -d loadersrc -f send.data.sql run sleep 2 run setadm $v conf/setadm_loaderq.ini status ./zcheck.sh skytools-3.2.6/tests/queue_loader/send.data.sql0000644000000000000000000000106712426435645016514 0ustar insert into data.simple_tbl (username, contactname, data) values ('randuser'||random()::text, 'randcontact'||random()::text, 'link'); /* insert into data.simple_tbl (username, contactname, data) values ('sameuser', 'samecontact', 'link'); update data.simple_tbl */ insert into data.bulk_tbl (data) values ('newdata'); insert into data.keep_all_tbl (username, data) values ('sameuser', 'newdata'); insert into data.keep_latest_tbl (username, data) values ('sameuser', 'newdata'); insert into data.random_tbl (username, data) values ('sameuser', 'newdata'); skytools-3.2.6/tests/zstop.sh0000755000000000000000000000025612426435645013175 0ustar #! /bin/sh #. ../env.sh for p in pid/*.pid*; do test -f "$p" || continue pid=`cat "$p"` test -d "/proc/$pid" || { rm -f "$p" continue } kill "$pid" done skytools-3.2.6/tests/multimaster/0000755000000000000000000000000012426435645014022 5ustar skytools-3.2.6/tests/multimaster/init.sh0000755000000000000000000000025512426435645015326 0ustar #! /bin/sh . ../env.sh lst="src1 src2 dst" ../zstop.sh for db in $lst; do echo dropdb $db dropdb $db done for db in $lst; do echo createdb $db createdb $db done skytools-3.2.6/tests/multimaster/regen.sh0000755000000000000000000000640712426435645015470 0ustar #! /bin/bash . ../testlib.sh ../zstop.sh v='-v' # bulkloader method meth=0 src_db_list="src1 src2" dst_db_list="dst" db_list="$src_db_list $dst_db_list" kdb_list=`echo $db_list | sed 's/ /,/g'` #( cd ../..; make -s install ) echo " * create configs * " # create ticker conf cat > conf/pgqd.ini < conf/londiste_$db.ini << EOF [londiste3] job_name = londiste_$db db = dbname=$db queue_name = replika_$db logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid EOF # londiste on source to target for dst in $dst_db_list; do cat > conf/londiste_${db}_${dst}.ini << EOF [londiste3] job_name = londiste_${db}_${dst} db = dbname=$dst queue_name = replika_$db logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid EOF done done for db in $db_list; do cleardb $db done clearlogs set -e msg "Install londiste3 and initialize nodes" for db in $src_db_list; do run londiste3 $v conf/londiste_$db.ini create-root $db "dbname=$db" for dst in $dst_db_list; do run londiste3 $v conf/londiste_${db}_${dst}.ini create-leaf $dst "dbname=$dst" --provider="dbname=$db" done done for db in $db_list; do run_sql $db "update pgq.queue set queue_ticker_idle_period='5 secs'" done msg "Run ticker" run pgqd -d conf/pgqd.ini run sleep 5 msg "See topology" for db in $src_db_list; do run londiste3 $v conf/londiste_$db.ini status done msg "Run londiste3 daemon for each node" for db in $src_db_list; do run londiste3 $v -d conf/londiste_$db.ini worker for dst in $dst_db_list; do run londiste3 $v -d conf/londiste_${db}_${dst}.ini worker done done for db in $dst_db_list; do run createlang -d $db plpythonu run psql $db -f ../../sql/conflicthandler/merge_on_time.sql done msg "Create table on root nodes, fill couple of rows and register" for db in $src_db_list; do run_sql $db "create table mytable (id int4 primary key, data text, tstamp timestamptz default now())" for n in 1 2 3; do run_sql $db "insert into mytable values ($n, 'row$n')" done run londiste3 $v conf/londiste_$db.ini add-table mytable done sleep 10 msg "Register table on dst node with creation" #run londiste3 $v conf/londiste_src1_dst.ini add-table mytable --create --no-merge --handler=applyfn --handler-arg="func_name=merge_on_time" --handler-arg="func_conf=timefield=tstamp" run londiste3 $v conf/londiste_src1_dst.ini add-table mytable --create --handler=multimaster --handler-arg="timefield=tstamp" sleep 10 #run londiste3 $v conf/londiste_src2_dst.ini add-table mytable --expect-sync --no-merge --handler=applyfn --handler-arg="func_name=merge_on_time" --handler-arg="func_conf=timefield=tstamp" run londiste3 $v conf/londiste_src2_dst.ini add-table mytable --expect-sync --handler=multimaster --handler-arg="timefield=tstamp" for db in $src_db_list; do for n in 4 5 6; do run_sql $db "insert into mytable values ($n, 'row$n::$db')" done sleep 3 done for n in 2 3 4; do run_sql src1 "update mytable set data = 'ok', tstamp = now() where id = $n" done for n in 1 5 6; do run_sql src2 "update mytable set data = 'ok', tstamp = now() where id = $n" done run sleep 10 for dst in $dst_db_list; do run_sql $dst "select * from mytable" done ../zcheck.sh skytools-3.2.6/tests/noqueue_merge/0000755000000000000000000000000012426435645014314 5ustar skytools-3.2.6/tests/noqueue_merge/overview.sh0000755000000000000000000000034512426435645016523 0ustar #! /bin/sh for db in part1 full1 full2; do echo "==== $db ===" psql -d $db -c "select * from pgq.get_consumer_info() where not consumer_name like '.%'" psql -d $db -c "select * from pgq_node.local_state order by 1,2" done skytools-3.2.6/tests/noqueue_merge/init.sh0000755000000000000000000000030312426435645015612 0ustar #! /bin/sh . ../env.sh lst="part1 part2 part3 part4 full1 full2" ../zstop.sh for db in $lst; do echo dropdb $db dropdb $db done for db in $lst; do echo createdb $db createdb $db done skytools-3.2.6/tests/noqueue_merge/regen.sh0000755000000000000000000000746212426435645015764 0ustar #! /bin/sh . ../testlib.sh v=-v title "NoQueue Merge" part_list="part1 part2 part3 part4" full_list="full1 full2" pnum=0 for p in $part_list; do pnum=$(($pnum + 1)) done merge_list="" for dst in $full_list; do for src in $part_list; do merge_list="$merge_list ${src}_${dst}" done done all_list="$part_list $full_list" kdb_list="`echo $all_list|sed 's/ /,/g'`" for db in $part_list $full_list; do cleardb $db done msg "clean logs" rm -f log/*.log msg "Create configs" # create ticker conf cat > conf/pgqd.ini << EOF [pgqd] database_list = $kdb_list logfile = log/pgqd.log pidfile = pid/pgqd.pid EOF # partition replicas for db in $part_list; do queue=l3_${db}_q part_job=${queue}_${db} # londiste on part node cat > conf/$part_job.ini << EOF [londiste3] job_name = ${part_job} db = dbname=$db queue_name = ${queue} logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid EOF # londiste on combined nodes for dst in $full_list; do full_job=${queue}_$dst cat > conf/${full_job}.ini << EOF [londiste3] job_name = ${full_job} db = dbname=$dst queue_name = $queue logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid EOF done done set -e msg "Create nodes for partition queues" # partition replicas for db in $part_list; do job=l3_${db}_q_${db} run londiste3 $v conf/${job}.ini create-root ${db}_root "dbname=${db}" done msg "Create merge nodes for partition queues" for dst in $full_list; do for src in $part_list; do job=l3_${src}_q_${dst} run londiste3 $v conf/${job}.ini \ create-leaf merge_${src}_${dst} "dbname=$dst" \ --provider="dbname=$src" done done msg "Tune PgQ" for db in $all_list; do run_sql $db "update pgq.queue set queue_ticker_idle_period='3 secs'" done msg "Launch ticker" run pgqd $v -d conf/pgqd.ini msg "Launch londiste worker" for db in $part_list; do queue=l3_${db}_q part_job=${queue}_${db} # londiste on part node run londiste3 $v -d conf/${part_job}.ini worker # londiste on combined nodes for dst in $full_list; do full_job=${queue}_$dst run londiste3 $v -d conf/${full_job}.ini worker done done msg "Create table in partition nodes" for db in $part_list; do run_sql "$db" "create table mydata (id int4 primary key, data text)" done msg "Register table in partition nodes" for db in $part_list; do job=l3_${db}_q_${db} run londiste3 $v conf/${job}.ini add-table mydata done msg "Wait until register reaches full1" cnt=0 while test $cnt -ne $pnum; do sleep 5 cnt=`psql -A -t -d full1 -c "select count(*) from londiste.table_info where merge_state is null"` echo " cnt_tbl=$cnt" done msg "Insert few rows" n=0 for p in $part_list; do n=$(($n + 1)) run_sql $p "insert into mydata values ($n, '$p')" done msg "Create table and register it in full nodes" for db in $full_list; do job=l3_part1_q_${db} run_sql $db "select * from londiste.table_info order by queue_name" run londiste3 $v conf/$job.ini add-table mydata --create --merge-all run_sql $db "select * from londiste.table_info order by queue_name" #for src in $part_list; do # run londiste3 $v conf/l3_${src}_q_${db}.ini add-table mydata #done done msg "Wait until copy finishes on full1" cnt=0 while test $cnt -ne $pnum; do sleep 5 cnt=`psql -A -t -d full1 -c "select count(*) from londiste.table_info where merge_state = 'ok'"` echo " cnt_ok=$cnt" done msg "Insert few rows" for n in 1 2 3 4; do run_sql part$n "insert into mydata values (4 + $n, 'part$n')" done run sleep 10 msg "Now check if data apprered" for db in $full_list; do run_sql $db "select * from mydata order by id" run_sql $db "select * from londiste.table_info order by queue_name" for src in $part_list; do run_sql $db "select * from londiste.get_table_list('l3_${src}_q')" done done ../zcheck.sh skytools-3.2.6/tests/cascade/0000755000000000000000000000000012426435645013037 5ustar skytools-3.2.6/tests/cascade/zcheck.sh0000755000000000000000000000010012426435645014634 0ustar #! /bin/sh grep -E 'ERR|WARN|CRIT' log/*.log || echo "All OK" skytools-3.2.6/tests/cascade/plainconsumer.py0000755000000000000000000000054312426435645016275 0ustar #! /usr/bin/env python import sys, time, skytools from pgq.cascade.consumer import CascadedConsumer class PlainCascadedConsumer(CascadedConsumer): def process_remote_event(self, src_curs, dst_curs, ev): ev.tag_done() if __name__ == '__main__': script = PlainCascadedConsumer('nop_consumer', 'dst_db', sys.argv[1:]) script.start() skytools-3.2.6/tests/cascade/ztest.sh0000755000000000000000000000017512426435645014552 0ustar #! /bin/sh . ../env.sh ./plainconsumer.py -v conf/nop_consumer.ini --register ./plainconsumer.py -v conf/nop_consumer.ini skytools-3.2.6/tests/cascade/zstop.sh0000755000000000000000000000025612426435645014560 0ustar #! /bin/sh #. ../env.sh for p in pid/*.pid*; do test -f "$p" || continue pid=`cat "$p"` test -d "/proc/$pid" || { rm -f "$p" continue } kill "$pid" done skytools-3.2.6/tests/cascade/status.sh0000755000000000000000000000024412426435645014721 0ustar #! /bin/sh . ../env.sh pgqadm conf/ticker_db1.ini status pgqadm conf/ticker_db2.ini status pgqadm conf/ticker_db3.ini status setadm -v conf/setadm.ini status skytools-3.2.6/tests/cascade/conf/0000755000000000000000000000000012426435645013764 5ustar skytools-3.2.6/tests/cascade/conf/nop_consumer.ini0000644000000000000000000000025712426435645017200 0ustar [nop_consumer] job_name = nop_consumer #_provider_db = dbname=db1 dst_db = dbname=db2 logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid queue_name = fooqueue skytools-3.2.6/tests/cascade/conf/worker_db2.ini0000644000000000000000000000022212426435645016521 0ustar [nop_worker] job_name = node2_worker dst_db = dbname=db2 logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid queue_name = fooqueue skytools-3.2.6/tests/cascade/conf/ticker_db3.ini0000644000000000000000000000017712426435645016503 0ustar [pgqadm] job_name = ticker_db3 db = dbname=db3 loop_delay = 0.5 logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid skytools-3.2.6/tests/cascade/conf/ticker_db2.ini0000644000000000000000000000017712426435645016502 0ustar [pgqadm] job_name = ticker_db2 db = dbname=db2 loop_delay = 0.5 logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid skytools-3.2.6/tests/cascade/conf/setadm.ini0000644000000000000000000000007612426435645015745 0ustar [cascade_admin] node_db = dbname=db2 queue_name = fooqueue skytools-3.2.6/tests/cascade/conf/worker_db3.ini0000644000000000000000000000022212426435645016522 0ustar [nop_worker] job_name = node3_worker dst_db = dbname=db3 logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid queue_name = fooqueue skytools-3.2.6/tests/cascade/conf/ticker_branch.ini0000644000000000000000000000047012426435645017264 0ustar [pgqadm] job_name = ticker_branch db = dbname=db_branch # how often to run maintenance [minutes] maint_delay_min = 1 # how often to check for activity [secs] loop_delay = 0.5 logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid use_skylog = 0 connection_lifetime = 21 queue_refresh_period = 10 skytools-3.2.6/tests/cascade/conf/worker_db1.ini0000644000000000000000000000022212426435645016520 0ustar [nop_worker] job_name = node1_worker dst_db = dbname=db1 logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid queue_name = fooqueue skytools-3.2.6/tests/cascade/conf/ticker_db1.ini0000644000000000000000000000017712426435645016501 0ustar [pgqadm] job_name = ticker_db1 db = dbname=db1 loop_delay = 0.5 logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid skytools-3.2.6/tests/cascade/init.sh0000755000000000000000000000033712426435645014344 0ustar #! /bin/sh . ../env.sh mkdir -p log pid dropdb db1 dropdb db2 dropdb db3 createdb db1 createdb db2 createdb db3 pgqadm conf/ticker_db1.ini install pgqadm conf/ticker_db2.ini install pgqadm conf/ticker_db3.ini install skytools-3.2.6/tests/cascade/regen.sh0000755000000000000000000000343412426435645014502 0ustar #! /bin/sh . ../env.sh mkdir -p log pid ./zstop.sh v= v=-v v=-q cleardb() { psql -q -d $db -c ' set client_min_messages=warning; drop schema if exists londiste cascade; drop schema if exists pgq_node cascade; drop schema if exists pgq cascade; drop table if exists mydata; drop table if exists footable; drop sequence if exists footable_id_seq; ' } run() { echo "$ $*" "$@" } for db in db1 db2 db3; do pgqadm conf/ticker_$db.ini -k cleardb $db done run ./plainconsumer.py -s conf/nop_consumer.ini rm -f log/*.log set -e run cat conf/ticker_db1.ini #echo " # pgqadm install # " run pgqadm $v conf/ticker_db1.ini install run pgqadm $v conf/ticker_db2.ini install run pgqadm $v conf/ticker_db3.ini install #echo " # pgqadm ticker # " run pgqadm $v -d conf/ticker_db1.ini ticker run pgqadm $v -d conf/ticker_db2.ini ticker run pgqadm $v -d conf/ticker_db3.ini ticker #echo " # setadm create-node # " run setadm $v --worker=node1_worker conf/setadm.ini create-root node1 'dbname=db1' run setadm $v --worker=node2_worker conf/setadm.ini create-branch node2 'dbname=db2' --provider='dbname=db1' run setadm $v --worker=node3_worker conf/setadm.ini create-branch node3 'dbname=db3' --provider='dbname=db2' #echo " # setadm status # " run setadm $v conf/setadm.ini status #echo " # plainconsumer # " run ./plainconsumer.py $v conf/nop_consumer.ini --register --provider='dbname=db1' run ./plainconsumer.py $v -d conf/nop_consumer.ini #echo " # plainworker # " run ./plainworker.py $v -d conf/worker_db1.ini run ./plainworker.py $v -d conf/worker_db2.ini run ./plainworker.py $v -d conf/worker_db3.ini #echo " # insert_event() # " run psql db1 -c "select pgq.insert_event('fooqueue', 'tmp', 'data')" run sleep 10 grep -E 'ERR|WARN|CRIT' log/*.log || true skytools-3.2.6/tests/cascade/plainworker.py0000755000000000000000000000063712426435645015757 0ustar #! /usr/bin/env python import sys, time, skytools from pgq.cascade.worker import CascadedWorker class PlainCascadedWorker(CascadedWorker): def process_remote_event(self, src_curs, dst_curs, ev): self.log.info("got events: %s / %s" % (ev.ev_type, ev.ev_data)) ev.tag_done() if __name__ == '__main__': script = PlainCascadedWorker('nop_worker', 'dst_db', sys.argv[1:]) script.start() skytools-3.2.6/tests/rename/0000755000000000000000000000000012426435645012723 5ustar skytools-3.2.6/tests/rename/regen.sh0000755000000000000000000000575512426435645014376 0ustar #! /bin/bash . ../testlib.sh ../zstop.sh v='-q' v='' db_list="rendb1 rendb2" kdb_list=`echo $db_list | sed 's/ /,/g'` #( cd ../..; make -s install ) do_check() { test $nocheck = 1 || ../zcheck.sh } title Rename test # create ticker conf cat > conf/pgqd.ini < conf/londiste_$db.ini < conf/pgqd.ini < conf/londiste_$db.ini < conf/pgqd.ini < conf/londiste_$db.ini <replika handler" run londiste3 $v conf/londiste_hdst.ini add-table mytable --handler=qsplitter --handler-arg="queue=replika" msg "Wait until table is in sync" cnt=0 while test $cnt -ne 1; do sleep 3 cnt=`psql -A -t -d hdst -c "select count(*) from londiste.table_info where merge_state = 'ok'"` echo " cnt=$cnt" done msg "Do some updates" run_sql hsrc "insert into mytable values (5, 'row5')" run_sql hsrc "update mytable set data = 'row5x' where id = 5" run_sql hsrc "insert into mytable values (6, 'row6')" run_sql hsrc "delete from mytable where id = 6" run_sql hsrc "insert into mytable values (7, 'row7')" run_sql hsrc "update mytable set data = 'row7x' where id = 7" run_sql hsrc "delete from mytable where id = 7" run_sql hsrc "delete from mytable where id = 1" run_sql hsrc "update mytable set data = 'row2x' where id = 2" run sleep 5 msg "Check status" run londiste3 $v conf/londiste_hsrc.ini status run sleep 5 tbl=$(psql hdst -qAtc "select * from pgq.current_event_table('replika');") msg "Check queue 'replika' form table $tbl" run_sql hdst "select * from $tbl" #run_sql hdst 'select * from mytable order by id' ../zcheck.sh skytools-3.2.6/tests/localconsumer/0000755000000000000000000000000012426435645014322 5ustar skytools-3.2.6/tests/localconsumer/init.sh0000755000000000000000000000010412426435645015617 0ustar #! /bin/sh . ../env.sh mkdir -p log pid dropdb qdb createdb qdb skytools-3.2.6/tests/localconsumer/regen.sh0000755000000000000000000000126112426435645015761 0ustar #! /bin/sh . ../testlib.sh for db in qdb; do cleardb $db done rm -f log/*.log mkdir -p state rm -f state/* set -e title LocalConsumer test title2 Initialization msg Install PgQ run_qadmin qdb "install pgq;" run_qadmin qdb "create queue test_queue;" msg Run ticker cat_file conf/pgqd.ini <&1 } run_sql() { code_on echo "$ psql -d \"$1\" -c \"$2\"" psql -d "$1" -c "$2" 2>&1 } run_qadmin() { code_on echo "$ qadmin -d \"$1\" -c \"$2\"" qadmin -d "$1" -c "$2" 2>&1 } msg() { code_off echo "" echo "$@" echo "" } cat_file() { code_off mkdir -p `dirname $1` echo ".File: $1" case "$1" in *Makefile) echo "[source,makefile]" ;; #*.[ch]) echo "[source,c]" ;; #*.ac) echo "[source,autoconf]" ;; #*.sh) echo "[source,shell]" ;; #*.sql) echo "[source,sql]" ;; *.*) printf "[source,%s]\n" `echo $1 | sed 's/.*\.//'` ;; esac echo "-----------------------------------" sed 's/^ //' > $1 cat $1 echo "-----------------------------------" } skytools-3.2.6/tests/setadm/0000755000000000000000000000000012426435645012731 5ustar skytools-3.2.6/tests/setadm/gendb.sh0000755000000000000000000000077312426435645014356 0ustar #! /bin/sh . ../env.sh ./stop.sh dropdb zset_root dropdb zset_branch dropdb zset_leaf createdb zset_root createdb zset_branch createdb zset_leaf setadm.py conf/admin.ini init-root z-root "dbname=zset_root" setadm.py conf/admin.ini init-branch z-branch "dbname=zset_branch" --provider=z-root setadm.py conf/admin.ini init-leaf z-leaf "dbname=zset_leaf" --provider=z-branch ./testconsumer.py -v -d conf/zroot.ini ./testconsumer.py -v -d conf/zbranch.ini ./testconsumer.py -v -d conf/zleaf.ini skytools-3.2.6/tests/setadm/stop.sh0000755000000000000000000000032312426435645014253 0ustar #! /bin/sh . ../env.sh ./testconsumer.py -s conf/zroot.ini ./testconsumer.py -s conf/zbranch.ini ./testconsumer.py -s conf/zleaf.ini #sleep 1 #pgqadm.py -s conf/ticker.ini #pgqadm.py -s conf/linkticker.ini skytools-3.2.6/tests/setadm/conf/0000755000000000000000000000000012426435645013656 5ustar skytools-3.2.6/tests/setadm/conf/zroot.ini0000644000000000000000000000023112426435645015530 0ustar [test_consumer] set_name = zset subscriber_db = dbname=zset_root job_name = root_consumer logfile = sys/log.%(job_name)s pidfile = sys/pid.%(job_name)s skytools-3.2.6/tests/setadm/conf/ticker_zleaf.ini0000644000000000000000000000017212426435645017021 0ustar [pgqadm] job_name = pgqadm_zleaf db = dbname=zset_leaf logfile = sys/log.%(job_name)s pidfile = sys/pid.%(job_name)s skytools-3.2.6/tests/setadm/conf/ticker_zroot.ini0000644000000000000000000000017212426435645017075 0ustar [pgqadm] job_name = pgqadm_zroot db = dbname=zset_root logfile = sys/log.%(job_name)s pidfile = sys/pid.%(job_name)s skytools-3.2.6/tests/setadm/conf/ticker_zbranch.ini0000644000000000000000000000017612426435645017353 0ustar [pgqadm] job_name = pgqadm_zbranch db = dbname=zset_branch logfile = sys/log.%(job_name)s pidfile = sys/pid.%(job_name)s skytools-3.2.6/tests/setadm/conf/admin.ini0000644000000000000000000000007012426435645015444 0ustar [set_admin] set_name = zset root_db = dbname=zset_root skytools-3.2.6/tests/setadm/conf/zbranch.ini0000644000000000000000000000023612426435645016007 0ustar [test_consumer] set_name = zset subscriber_db = dbname=zset_branch job_name = branch_consumer logfile = sys/log.%(job_name)s pidfile = sys/pid.%(job_name)s skytools-3.2.6/tests/setadm/conf/zleaf.ini0000644000000000000000000000023212426435645015455 0ustar [test_consumer] set_name = zset subscriber_db = dbname=zset_leaf job_name = leaf_consumer logfile = sys/log.%(job_name)s pidfile = sys/pid.%(job_name)s skytools-3.2.6/tests/setadm/testconsumer.py0000755000000000000000000000030012426435645016032 0ustar #! /usr/bin/env python import sys, pgq class TestConsumer(pgq.SetConsumer): pass if __name__ == '__main__': script = TestConsumer('test_consumer', sys.argv[1:]) script.start() skytools-3.2.6/tests/skylog/0000755000000000000000000000000012426435645012764 5ustar skytools-3.2.6/tests/skylog/runtest.sh0000755000000000000000000000007212426435645015026 0ustar #! /bin/sh . ../env.sh exec ./logtest.py test.ini "$@" skytools-3.2.6/tests/skylog/logtest.py0000755000000000000000000000056512426435645015030 0ustar #! /usr/bin/env python import sys, os, skytools import skytools.skylog class LogTest(skytools.DBScript): def work(self): self.log.error('test error') self.log.warning('test warning') self.log.info('test info') self.log.debug('test debug') if __name__ == '__main__': script = LogTest('log_test', sys.argv[1:]) script.start() skytools-3.2.6/tests/skylog/skylog.ini0000644000000000000000000000252212426435645014776 0ustar ; notes: ; - 'args' is mandatory in [handler_*] sections ; - in lists there must not be spaces ; ; top-level config ; ; list of all loggers [loggers] keys=root ; root logger sees everything. there can be per-job configs by ; specifing loggers with job_name of the script ; list of all handlers [handlers] keys=stderr,logdb,logsrv,logfile ; list of all formatters [formatters] keys=short,long,none ; ; map specific loggers to specifig handlers ; [logger_root] level=DEBUG handlers=stderr,logdb,logsrv,logfile ;,logfile ;logdb,logsrv,logfile ; ; configure formatters ; [formatter_short] format=%(asctime)s %(levelname)s %(message)s datefmt=%H:%M [formatter_long] format=%(asctime)s %(process)s %(levelname)s %(message)s [formatter_none] format=%(message)s ; ; configure handlers ; ; file. args: stream [handler_stderr] class=StreamHandler args=(sys.stderr,) formatter=short ; log into db. args: conn_string [handler_logdb] class=skylog.LogDBHandler args=("host=127.0.0.1 port=5432 user=marko dbname=logdb",) formatter=none level=INFO ; JSON messages over UDP. args: host, port [handler_logsrv] class=skylog.UdpLogServerHandler args=('127.0.0.1', 6666) formatter=none ; rotating logfile. args: filename, maxsize, maxcount [handler_logfile] class=skylog.EasyRotatingFileHandler args=('~/log/%(job_name)s.log', 100*1024*1024, 3) formatter=long skytools-3.2.6/tests/skylog/test.ini0000644000000000000000000000006112426435645014441 0ustar [log_test] loop_delay = 5 logfile = xtest.log skytools-3.2.6/tests/quoting/0000755000000000000000000000000012426435645013142 5ustar skytools-3.2.6/tests/quoting/regtest.py0000755000000000000000000000544012426435645015177 0ustar #! /usr/bin/env python import sys, time import skytools.psycopgwrapper import skytools._cquoting, skytools._pyquoting from decimal import Decimal # create a DictCursor row class fake_cursor: index = {'id': 0, 'data': 1} description = ['x', 'x'] dbrow = skytools.psycopgwrapper._CompatRow(fake_cursor()) dbrow[0] = '123' dbrow[1] = 'value' def regtest(name, func, cases): bad = 0 for dat, res in cases: res2 = func(dat) if res != res2: print("failure: %s(%s) = %s (expected %s)" % (name, repr(dat), repr(res2), repr(res))) bad += 1 if bad: print("%-20s: failed" % name) else: print("%-20s: OK" % name) sql_literal = [ [None, "null"], ["", "''"], ["a'b", "'a''b'"], [r"a\'b", r"E'a\\''b'"], [1, "'1'"], [True, "'True'"], [Decimal(1), "'1'"], ] regtest("quote_literal/c", skytools._cquoting.quote_literal, sql_literal) regtest("quote_literal/py", skytools._pyquoting.quote_literal, sql_literal) sql_copy = [ [None, "\\N"], ["", ""], ["a'\tb", "a'\\tb"], [r"a\'b", r"a\\'b"], [1, "1"], [True, "True"], [u"qwe", "qwe"], [Decimal(1), "1"], ] regtest("quote_copy/c", skytools._cquoting.quote_copy, sql_copy) regtest("quote_copy/py", skytools._pyquoting.quote_copy, sql_copy) sql_bytea_raw = [ [None, None], ["", ""], ["a'\tb", "a'\\011b"], [r"a\'b", r"a\\'b"], ["\t\344", r"\011\344"], ] regtest("quote_bytea_raw/c", skytools._cquoting.quote_bytea_raw, sql_bytea_raw) regtest("quote_bytea_raw/py", skytools._pyquoting.quote_bytea_raw, sql_bytea_raw) sql_ident = [ ["", ""], ["a'\t\\\"b", '"a\'\t\\""b"'], ['abc_19', 'abc_19'], ['from', '"from"'], ['0foo', '"0foo"'], ['mixCase', '"mixCase"'], ] regtest("quote_ident", skytools.quote_ident, sql_ident) t_urlenc = [ [{}, ""], [{'a': 1}, "a=1"], [{'a': None}, "a"], [{'qwe': 1, u'zz': u"qwe"}, "qwe=1&zz=qwe"], [{'a': '\000%&'}, "a=%00%25%26"], [dbrow, 'data=value&id=123'], [{'a': Decimal("1")}, "a=1"], ] regtest("db_urlencode/c", skytools._cquoting.db_urlencode, t_urlenc) regtest("db_urlencode/py", skytools._pyquoting.db_urlencode, t_urlenc) t_urldec = [ ["", {}], ["a=b&c", {'a': 'b', 'c': None}], ["&&b=f&&", {'b': 'f'}], [u"abc=qwe", {'abc': 'qwe'}], ["b=", {'b': ''}], ["b=%00%45", {'b': '\x00E'}], ] regtest("db_urldecode/c", skytools._cquoting.db_urldecode, t_urldec) regtest("db_urldecode/py", skytools._pyquoting.db_urldecode, t_urldec) t_unesc = [ ["", ""], ["\\N", "N"], ["abc", "abc"], [u"abc", "abc"], [r"\0\000\001\01\1", "\0\000\001\001\001"], [r"a\001b\tc\r\n", "a\001b\tc\r\n"], ] regtest("unescape/c", skytools._cquoting.unescape, t_unesc) regtest("unescape/py", skytools._pyquoting.unescape, t_unesc) skytools-3.2.6/.gitignore0000644000000000000000000000223712426435645012306 0ustar *~ *.pyc *.pyo *.[oas] *.so *.so.* *.diff *.log *.swp *.html *.xml .deps .DS_Store .idea config.mak config.log config.status config.guess config.sub configure install-sh autom4te.cache aclocal.m4 source.list regression.diffs regression.out results pid log dist tags zconf* .gdb* debian/*-stamp debian/*tmp* debian/packages debian/files debian/*substvars debian/*debhelper debian/patches debian/python-pgq3/* debian/python-skytools3/* debian/postgresql-*-pgq3/* debian/skytools3*/* debian/skytools-pgq*/* debian/skytools-londiste*/* debian/control-pgstamp python/skytools/installer_config.py sql/txid/txid.sql sql/pgq_node/pgq_node.sql sql/pgq_node/pgq_node.upgrade.sql sql/londiste/londiste.sql sql/londiste/londiste.upgrade.sql sql/pgq/pgq.sql sql/pgq/pgq.upgrade.sql sql/pgq_coop/pgq_coop.sql sql/pgq_coop/pgq_coop.upgrade.sql sql/pgq_ext/pgq_ext.sql sql/pgq_ext/pgq_ext.upgrade.sql sql/ticker/pgqd sql/txid/txid.sql.in tests/londiste/conf tests/merge/conf sql/*/*--*--*.sql sql/*/*--*.sql sql/*/test.dump sql/*/structure/newgrants*.sql sql/*/structure/oldgrants*.sql tmp_files.lst sql/ticker/pgqd.ini.h build build.sk3 doc/londiste.5 doc/man reconfigure.sh *.orig *.rej .pc skytools-3.2.6/COPYRIGHT0000644000000000000000000000143512426435645011610 0ustar SkyTools - tool collection for PostgreSQL Copyright (c) 2007 Marko Kreen, Skype Technologies OÜ Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. skytools-3.2.6/setup_pkgloader.py0000755000000000000000000000042412426435645014057 0ustar #! /usr/bin/env python from distutils.core import setup setup( name = "pkgloader", license = "ISC", version = '1.0', maintainer = "Marko Kreen", maintainer_email = "markokr@gmail.com", package_dir = {'': 'python'}, py_modules = ['pkgloader'], ) skytools-3.2.6/scripts/0000755000000000000000000000000012426435645012001 5ustar skytools-3.2.6/scripts/queue_splitter.py0000755000000000000000000000263712426435645015440 0ustar #! /usr/bin/env python """Puts events into queue specified by field from 'queue_field' config parameter. Config parameters:: ## Parameters for queue_splitter # database locations src_db = dbname=sourcedb_test dst_db = dbname=destdb_test # event fields from where target queue name is read #queue_field = extra1 """ import sys import pkgloader pkgloader.require('skytools', '3.0') import pgq class QueueSplitter(pgq.SerialConsumer): __doc__ = __doc__ def __init__(self, args): pgq.SerialConsumer.__init__(self, "queue_splitter3", "src_db", "dst_db", args) def process_remote_batch(self, db, batch_id, ev_list, dst_db): cache = {} queue_field = self.cf.get('queue_field', 'extra1') for ev in ev_list: row = [ev.type, ev.data, ev.extra1, ev.extra2, ev.extra3, ev.extra4, ev.time] queue = ev.__getattr__(queue_field) if queue not in cache: cache[queue] = [] cache[queue].append(row) # should match the composed row fields = ['type', 'data', 'extra1', 'extra2', 'extra3', 'extra4', 'time'] # now send them to right queues curs = dst_db.cursor() for queue, rows in cache.items(): pgq.bulk_insert_events(curs, rows, fields, queue) if __name__ == '__main__': script = QueueSplitter(sys.argv[1:]) script.start() skytools-3.2.6/scripts/data_maintainer.py0000755000000000000000000003111012426435645015472 0ustar #!/usr/bin/env python """Generic script for processing large data sets in small batches. Reads events from one datasource and commits them into another one, either one by one or in batches. Config template:: [data_maintainer3] job_name = dm_remove_expired_services # if source is database, you need to specify dbread and sql_get_pk_list dbread = dbname=sourcedb_test sql_get_pk_list = select username from user_service where expire_date < now(); # if source is csv file, you need to specify fileread and optionally csv_delimiter and csv_quotechar #fileread = data.csv #csv_delimiter = , #csv_quotechar = " dbwrite = dbname=destdb port=1234 host=dbhost.com user=guest password=secret dbbefore = dbname=destdb_test dbafter = dbname=destdb_test dbcrash = dbname=destdb_test dbthrottle = dbname=queuedb_test # It is a good practice to include same where condition on target side as on read side, # to ensure that you are actually changing the same data you think you are, # especially when reading from replica database or when processing takes days. sql_modify = delete from user_service where username = %%(username)s and expire_date < now(); # This will be run before executing the sql_get_pk_list query (optional) #sql_before_run = # select * from somefunction1(%(job_name)s); # This will be run when the DM finishes (optional) #sql_after_run = # select * from somefunction2(%(job_name)s); # Determines whether the sql_after_run query will be run in case the pk list query returns no rows #after_zero_rows = 1 # This will be run if the DM crashes (optional) #sql_on_crash = # select * from somefunction3(%(job_name)s); # This may be used to control throttling of the DM (optional) #sql_throttle = # select lag>'5 minutes'::interval from pgq.get_consumer_info('failoverconsumer'); # materialize query so that transaction should not be open while processing it (only used when source is a database) #with_hold = 1 # how many records process to fetch at once and if batch processing is used then # also how many records are processed in one commit #fetch_count = 100 # by default commit after each row (safe when behind plproxy, bouncer or whatever) # can be turned off for better performance when connected directly to database #autocommit = 1 # just for tuning to throttle how much load we let onto write database #commit_delay = 0.0 # quite often data_maintainer is run from crontab and then loop delay is not needed # in case it has to be run as daemon set loop delay in seconds #loop_delay = 1 logfile = ~/log/%(job_name)s.log pidfile = ~/pid/%(job_name)s.pid use_skylog = 0 """ import csv import datetime import os.path import sys import time import pkgloader pkgloader.require('skytools', '3.0') import skytools class DataSource (object): def __init__(self, log): self.log = log def open(self): raise NotImplementedError def close(self): raise NotImplementedError def fetch(self, count): raise NotImplementedError class DBDataSource (DataSource): def __init__(self, log, db, query, bres = None, with_hold = False): super(DBDataSource, self).__init__(log) self.db = db if with_hold: self.query = "DECLARE data_maint_cur NO SCROLL CURSOR WITH HOLD FOR %s" % query else: self.query = "DECLARE data_maint_cur NO SCROLL CURSOR FOR %s" % query self.bres = bres self.with_hold = with_hold def _run_query(self, query, params = None): self.cur.execute(query, params) self.log.debug(self.cur.query) self.log.debug(self.cur.statusmessage) def open(self): self.cur = self.db.cursor() self._run_query(self.query, self.bres) # pass results from before_query into sql_pk def close(self): self.cur.execute("CLOSE data_maint_cur") if not self.with_hold: self.db.rollback() def fetch(self, count): self._run_query("FETCH FORWARD %i FROM data_maint_cur" % count) return self.cur.fetchall() class CSVDataSource (DataSource): def __init__(self, log, filename, delimiter, quotechar): super(CSVDataSource, self).__init__(log) self.filename = filename self.delimiter = delimiter self.quotechar = quotechar def open(self): self.fp = open(self.filename, 'rb') self.reader = csv.DictReader(self.fp, delimiter = self.delimiter, quotechar = self.quotechar) def close(self): self.fp.close() def fetch(self, count): ret = [] for row in self.reader: ret.append(row) count -= 1 if count <= 0: break return ret class DataMaintainer (skytools.DBScript): __doc__ = __doc__ loop_delay = -1 def __init__(self, args): super(DataMaintainer, self).__init__("data_maintainer3", args) # source file self.fileread = self.cf.get("fileread", "") if self.fileread: self.fileread = os.path.expanduser(self.fileread) self.set_single_loop(True) # force single run if source is file self.csv_delimiter = self.cf.get("csv_delimiter", ',') self.csv_quotechar = self.cf.get("csv_quotechar", '"') # query for fetching the PK-s of the data set to be maintained self.sql_pk = self.cf.get("sql_get_pk_list", "") if (int(bool(self.sql_pk)) + int(bool(self.fileread))) in (0,2): raise skytools.UsageError("Either fileread or sql_get_pk_list must be specified in the configuration file") # query for changing data tuple ( autocommit ) self.sql_modify = self.cf.get("sql_modify") # query to be run before starting the data maintainer, # useful for retrieving initialization parameters of the query self.sql_before = self.cf.get("sql_before_run", "") # query to be run after finishing the data maintainer self.sql_after = self.cf.get("sql_after_run", "") # whether to run the sql_after query in case of 0 rows self.after_zero_rows = self.cf.getint("after_zero_rows", 1) # query to be run if the process crashes self.sql_crash = self.cf.get("sql_on_crash", "") # query for checking if / how much to throttle self.sql_throttle = self.cf.get("sql_throttle", "") # how many records to fetch at once self.fetchcnt = self.cf.getint("fetchcnt", 100) self.fetchcnt = self.cf.getint("fetch_count", self.fetchcnt) # specifies if non-transactional cursor should be created (0 -> without hold) self.withhold = self.cf.getint("with_hold", 1) # execution mode (0 -> whole batch is committed / 1 -> autocommit) self.autocommit = self.cf.getint("autocommit", 1) # delay in seconds after each commit self.commit_delay = self.cf.getfloat("commit_delay", 0.0) def work(self): self.log.info('Starting..') self.started = self.lap_time = time.time() self.total_count = 0 bres = {} if self.sql_before: bdb = self.get_database("dbbefore", autocommit=1) bcur = bdb.cursor() bcur.execute(self.sql_before) if bcur.statusmessage.startswith('SELECT'): res = bcur.fetchall() assert len(res)==1, "Result of a 'before' query must be 1 row" bres = res[0].copy() if self.sql_throttle: dbt = self.get_database("dbthrottle", autocommit=1) tcur = dbt.cursor() if self.autocommit: self.log.info("Autocommit after each modify") dbw = self.get_database("dbwrite", autocommit=1) else: self.log.info("Commit in %i record batches", self.fetchcnt) dbw = self.get_database("dbwrite", autocommit=0) if self.fileread: self.datasource = CSVDataSource(self.log, self.fileread, self.csv_delimiter, self.csv_quotechar) else: if self.withhold: dbr = self.get_database("dbread", autocommit=1) else: dbr = self.get_database("dbread", autocommit=0) self.datasource = DBDataSource(self.log, dbr, self.sql_pk, bres, self.withhold) self.datasource.open() mcur = dbw.cursor() while True: # loop while fetch returns fetch_count rows self.fetch_started = time.time() res = self.datasource.fetch(self.fetchcnt) count, lastitem = self.process_batch(res, mcur, bres) self.total_count += count if not self.autocommit: dbw.commit() self.stat_put("duration", time.time() - self.fetch_started) self.send_stats() if len(res) < self.fetchcnt or self.last_sigint: break if self.commit_delay > 0.0: time.sleep(self.commit_delay) if self.sql_throttle: self.throttle(tcur) self._print_count("--- Running count: %s duration: %s ---") if self.last_sigint: self.log.info("Exiting on user request") self.datasource.close() self.log.info("--- Total count: %s duration: %s ---", self.total_count, datetime.timedelta(0, round(time.time() - self.started))) if self.sql_after and (self.after_zero_rows > 0 or self.total_count > 0): adb = self.get_database("dbafter", autocommit=1) acur = adb.cursor() acur.execute(self.sql_after, lastitem) def process_batch(self, res, mcur, bres): """ Process events in autocommit mode reading results back and trying to make some sense out of them """ try: count = 0 item = bres.copy() for i in res: # for each row in read query result item.update(i) mcur.execute(self.sql_modify, item) self.log.debug(mcur.query) if mcur.statusmessage.startswith('SELECT'): # if select was used we can expect some result mres = mcur.fetchall() for r in mres: if 'stats' in r: # if specially handled column 'stats' is present for k, v in skytools.db_urldecode(r['stats'] or '').items(): self.stat_increase(k, int(v)) self.log.debug(r) else: self.stat_increase('processed', mcur.rowcount) self.log.debug(mcur.statusmessage) if 'cnt' in item: count += item['cnt'] self.stat_increase("count", item['cnt']) else: count += 1 self.stat_increase("count") if self.last_sigint: break return count, item except: # process has crashed, run sql_crash and re-raise the exception if self.sql_crash: dbc = self.get_database("dbcrash", autocommit=1) ccur = dbc.cursor() ccur.execute(self.sql_crash, item) raise def throttle(self, tcur): while not self.last_sigint: tcur.execute(self.sql_throttle) _r = tcur.fetchall() assert len(_r) == 1 and len(_r[0]) == 1, "Result of 'throttle' query must be 1 value" throttle = _r[0][0] if isinstance(throttle, bool): tt = float(throttle and 30) elif isinstance(throttle, (int, float)): tt = float(throttle) else: self.log.warn("Result of 'throttle' query must be boolean or numeric") break if tt > 0.0: self.log.debug("sleeping %f s", tt) time.sleep(tt) else: break self._print_count("--- Waiting count: %s duration: %s ---") def _print_count(self, text): if time.time() - self.lap_time > 60.0: # if one minute has passed print running totals self.log.info(text, self.total_count, datetime.timedelta(0, round(time.time() - self.started))) self.lap_time = time.time() def shutdown(self): super(DataMaintainer, self).shutdown() self.log.info("Script finished, exiting") if __name__ == '__main__': script = DataMaintainer(sys.argv[1:]) script.start() skytools-3.2.6/scripts/scriptmgr.py0000755000000000000000000002647612426435645014407 0ustar #! /usr/bin/env python """Bulk start/stop of scripts. Reads a bunch of config files and maps them to scripts, then handles those. Config template: [scriptmgr] job_name = scriptmgr_cphdb5 config_list = ~/random/conf/*.ini logfile = ~/log/%(job_name)s.log pidfile = ~/pid/%(job_name)s.pid #use_skylog = 1 # defaults for services [DEFAULT] cwd = ~/ args = -v # service descriptions [cube_dispatcher] script = cube_dispatcher.py [table_dispatcher] script = table_dispatcher.py [bulk_loader] script = bulk_loader.py [londiste] script = londiste.py args = replay [pgqadm] script = pgqadm.py args = ticker # services to be ignored [log_checker] disabled = 1 """ import sys, os, signal, glob, ConfigParser, time import pkgloader pkgloader.require('skytools', '3.0') import skytools try: import pwd except ImportError: pwd = None command_usage = """ %prog [options] INI CMD [subcmd args] Commands: start -a | -t=service | jobname [...] start job(s) stop -a | -t=service | jobname [...] stop job(s) restart -a | -t=service | jobname [...] restart job(s) reload -a | -t=service | jobname [...] send reload signal status [-a | -t=service | jobname ...] """ def job_sort_cmp(j1, j2): d1 = j1['service'] + j1['job_name'] d2 = j2['service'] + j2['job_name'] if d1 < d2: return -1 elif d1 > d2: return 1 else: return 0 def launch_cmd(job, cmd): if job['user']: cmd = 'sudo -nH -u "%s" %s' % (job['user'], cmd) return os.system(cmd) def full_path(job, fn): """Like os.path.expanduser() but works for other users. """ if not fn: return fn if fn[0] == '~': if fn.find('/') > 0: user, rest = fn.split('/',1) else: user = fn rest = '' user = user[1:] if not user: user = job['user'] # find home if user: home = pwd.getpwnam(user).pw_dir elif 'HOME' in os.environ: home = os.environ['HOME'] else: home = os.pwd.getpwuid(os.getuid()).pw_dir if rest: return os.path.join(home, rest) else: return home # always return full path return os.path.join(job['cwd'], fn) class ScriptMgr(skytools.DBScript): __doc__ = __doc__ svc_list = [] svc_map = {} config_list = [] job_map = {} job_list = [] def init_optparse(self, p = None): p = skytools.DBScript.init_optparse(self, p) p.add_option("-a", "--all", action="store_true", help="apply command to all jobs") p.add_option("-t", "--type", action="store", metavar="SVC", help="apply command to all jobs of this service type") p.add_option("-w", "--wait", action="store_true", help="wait for job(s) after signaling") p.set_usage(command_usage.strip()) return p def load_jobs(self): self.svc_list = [] self.svc_map = {} self.config_list = [] # load services svc_list = self.cf.sections() svc_list.remove(self.service_name) with_user = 0 without_user = 0 for svc_name in svc_list: cf = self.cf.clone(svc_name) disabled = cf.getboolean('disabled', 0) defscript = None if disabled: defscript = '/disabled' svc = { 'service': svc_name, 'script': cf.getfile('script', defscript), 'cwd': cf.getfile('cwd'), 'disabled': disabled, 'args': cf.get('args', ''), 'user': cf.get('user', ''), } if svc['user']: with_user += 1 else: without_user += 1 self.svc_list.append(svc) self.svc_map[svc_name] = svc if with_user and without_user: raise skytools.UsageError("Invalid config - some jobs have user=, some don't") # generate config list for tmp in self.cf.getlist('config_list'): tmp = os.path.expanduser(tmp) tmp = os.path.expandvars(tmp) for fn in glob.glob(tmp): self.config_list.append(fn) # read jobs for fn in self.config_list: raw = ConfigParser.SafeConfigParser({'job_name':'?', 'service_name':'?'}) raw.read(fn) # skip its own config if raw.has_section(self.service_name): continue got = 0 for sect in raw.sections(): if sect in self.svc_map: got = 1 self.add_job(fn, sect) if not got: self.log.warning('Cannot find service for %s', fn) def add_job(self, cf_file, service_name): svc = self.svc_map[service_name] cf = skytools.Config(service_name, cf_file) disabled = svc['disabled'] if not disabled: disabled = cf.getboolean('disabled', 0) job = { 'disabled': disabled, 'config': cf_file, 'cwd': svc['cwd'], 'script': svc['script'], 'args': svc['args'], 'user': svc['user'], 'service': svc['service'], 'job_name': cf.get('job_name'), 'pidfile': cf.get('pidfile', ''), } if job['pidfile']: job['pidfile'] = full_path(job, job['pidfile']) self.job_list.append(job) self.job_map[job['job_name']] = job def cmd_status (self, jobs): for jn in jobs: try: job = self.job_map[jn] except KeyError: self.log.error ("Unknown job: %s", jn) continue pidfile = job['pidfile'] name = job['job_name'] svc = job['service'] if job['disabled']: name += " (disabled)" if not pidfile: print(" pidfile? [%s] %s" % (svc, name)) elif os.path.isfile(pidfile): print(" OK [%s] %s" % (svc, name)) else: print(" STOPPED [%s] %s" % (svc, name)) def cmd_info (self, jobs): for jn in jobs: try: job = self.job_map[jn] except KeyError: self.log.error ("Unknown job: %s", jn) continue print(job) def cmd_start(self, job_name): job = self.get_job_by_name (job_name) if isinstance (job, int): return job # ret.code self.log.info('Starting %s', job_name) pidfile = job['pidfile'] if not pidfile: self.log.warning("No pidfile for %s, cannot launch", job_name) return 0 if os.path.isfile(pidfile): if skytools.signal_pidfile(pidfile, 0): self.log.warning("Script %s seems running", job_name) return 0 else: self.log.info("Ignoring stale pidfile for %s", job_name) os.chdir(job['cwd']) cmd = "%(script)s %(config)s %(args)s -d" % job res = launch_cmd(job, cmd) self.log.debug(res) if res != 0: self.log.error('startup failed: %s', job_name) return 1 else: return 0 def cmd_stop(self, job_name): job = self.get_job_by_name (job_name) if isinstance (job, int): return job # ret.code self.log.info('Stopping %s', job_name) self.signal_job(job, signal.SIGINT) def cmd_reload(self, job_name): job = self.get_job_by_name (job_name) if isinstance (job, int): return job # ret.code self.log.info('Reloading %s', job_name) self.signal_job(job, signal.SIGHUP) def get_job_by_name (self, job_name): if job_name not in self.job_map: self.log.error ("Unknown job: %s", job_name) return 1 job = self.job_map[job_name] if job['disabled']: self.log.info ("Skipping %s", job_name) return 0 return job def wait_for_stop (self, job_name): job = self.get_job_by_name (job_name) if isinstance (job, int): return job # ret.code msg = False while True: if skytools.signal_pidfile (job['pidfile'], 0): if not msg: self.log.info ("Waiting for %s to stop", job_name) msg = True time.sleep (0.1) else: return 0 def signal_job(self, job, sig): pidfile = job['pidfile'] if not pidfile: self.log.warning("No pidfile for %s (%s)", job['job_name'], job['config']) return if os.path.isfile(pidfile): pid = int(open(pidfile).read()) if job['user']: # run sudo + kill to avoid killing unrelated processes res = os.system("sudo -u %s kill %d" % (job['user'], pid)) if res: self.log.warning("Signaling %s failed", job['job_name']) else: # direct kill try: os.kill(pid, sig) except Exception, det: self.log.warning("Signaling %s failed: %s", job['job_name'], det) else: self.log.warning("Job %s not running", job['job_name']) def work(self): self.set_single_loop(1) self.job_list = [] self.job_map = {} self.load_jobs() self.job_list.sort(job_sort_cmp) if len(self.args) < 2: print("need command") sys.exit(1) cmd = self.args[1] jobs = self.args[2:] if cmd in ["status", "info"] and len(jobs) == 0 and not self.options.type: self.options.all = True if len(jobs) == 0 and self.options.all: for job in self.job_list: jobs.append(job['job_name']) if len(jobs) == 0 and self.options.type: for job in self.job_list: if job['service'] == self.options.type: jobs.append(job['job_name']) if cmd == "status": self.cmd_status(jobs) return elif cmd == "info": self.cmd_info(jobs) return if len(jobs) == 0: print("no jobs given?") sys.exit(1) if cmd == "start": err = 0 for n in jobs: err += self.cmd_start(n) if err > 0: self.log.error('some scripts failed') sys.exit(1) elif cmd == "stop": for n in jobs: self.cmd_stop(n) if self.options.wait: for n in jobs: self.wait_for_stop(n) elif cmd == "restart": for n in jobs: self.cmd_stop(n) if self.options.wait: for n in jobs: self.wait_for_stop(n) else: time.sleep(2) for n in jobs: self.cmd_start(n) elif cmd == "reload": for n in jobs: self.cmd_reload(n) else: print("unknown command: " + cmd) sys.exit(1) if __name__ == '__main__': script = ScriptMgr('scriptmgr', sys.argv[1:]) script.start() skytools-3.2.6/scripts/simple_local_consumer.py0000755000000000000000000000403212426435645016733 0ustar #!/usr/bin/env python """Consumer that simply calls SQL query for each event. It tracks completed batches in local file. Config:: # source database src_db = # destination database dst_db = # query to call dst_query = select * from somefunc(%%(pgq.ev_data)s); ## Use table_filter where possible instead of this ## # filter for events (SQL fragment) consumer_filter = ev_extra1 = 'public.mytable1' """ import sys import psycopg2 import pkgloader pkgloader.require('skytools', '3.0') import pgq import skytools class SimpleLocalConsumer(pgq.LocalConsumer): __doc__ = __doc__ def reload(self): super(SimpleLocalConsumer, self).reload() self.dst_query = self.cf.get("dst_query") if self.cf.get("consumer_filter", ""): self.consumer_filter = self.cf.get("consumer_filter", "") def process_local_event(self, db, batch_id, ev): if ev.ev_type[:2] not in ('I:', 'U:', 'D:'): return if ev.ev_data is None: payload = {} else: payload = skytools.db_urldecode(ev.ev_data) payload['pgq.tick_id'] = self.batch_info['cur_tick_id'] payload['pgq.ev_id'] = ev.ev_id payload['pgq.ev_time'] = ev.ev_time payload['pgq.ev_type'] = ev.ev_type payload['pgq.ev_data'] = ev.ev_data payload['pgq.ev_extra1'] = ev.ev_extra1 payload['pgq.ev_extra2'] = ev.ev_extra2 payload['pgq.ev_extra3'] = ev.ev_extra3 payload['pgq.ev_extra4'] = ev.ev_extra4 self.log.debug(self.dst_query, payload) retries, curs = self.execute_with_retry('dst_db', self.dst_query, payload, exceptions = (psycopg2.OperationalError,)) if curs.statusmessage[:6] == 'SELECT': res = curs.fetchall() self.log.debug(res) else: self.log.debug(curs.statusmessage) if __name__ == '__main__': script = SimpleLocalConsumer("simple_local_consumer3", "src_db", sys.argv[1:]) script.start() skytools-3.2.6/scripts/find_sql_functions.py0000755000000000000000000000425412426435645016252 0ustar #! /usr/bin/env python """Find and print out function signatures from .sql file. Usage: find_sql_functions.py [-h] [-s] [-p PREFIX] FILE ... Switches: -h Show help -p PREFIX Prefix each line with string -s Check whether function is SECURITY DEFINER """ import sys, re, getopt rx = r""" ^ create \s+ (?: or \s+ replace \s+ )? function ( [^(]+ ) [(] ( [^)]* ) [)] """ rx_secdef = r"""security\s+definer""" rc = re.compile(rx, re.I | re.M | re.X) sc = re.compile(r"\s+") rc_sec = re.compile(rx_secdef, re.I | re.X) def grep_file(fn, cf_prefix, cf_secdef): sql = open(fn).read() pos = 0 while 1: m = rc.search(sql, pos) if not m: break pos = m.end() m2 = rc.search(sql, pos) if m2: xpos = m2.end() else: xpos = len(sql) secdef = False m2 = rc_sec.search(sql, pos, xpos) if m2: secdef = True fname = m.group(1).strip() fargs = m.group(2) alist = fargs.split(',') tlist = [] for a in alist: a = a.strip() toks = sc.split(a.lower()) if toks[0] == "out": continue if toks[0] in ("in", "inout"): toks = toks[1:] # just take last item tlist.append(toks[-1]) sig = "%s(%s)" % (fname, ", ".join(tlist)) if cf_prefix: ln = "%s %s;" % (cf_prefix, sig) else: ln = " %s(%s)," % (fname, ", ".join(tlist)) if cf_secdef and secdef: ln = "%-72s -- SECDEF" % (ln) print ln def main(argv): cf_secdef = 0 cf_prefix = '' try: opts, args = getopt.getopt(argv, "hsp:") except getopt.error, d: print 'getopt:', d sys.exit(1) for o, a in opts: if o == '-h': print __doc__ sys.exit(0) elif o == '-s': cf_secdef = 1 elif o == '-p': cf_prefix = a else: print __doc__ sys.exit(1) for fn in args: grep_file(fn, cf_prefix, cf_secdef) if __name__ == '__main__': main(sys.argv[1:]) skytools-3.2.6/scripts/skytools_upgrade.py0000755000000000000000000001416612426435645015764 0ustar #! /usr/bin/env python """Upgrade script for versioned schemas.""" usage = """ %prog [--user=U] [--host=H] [--port=P] --all %prog [--user=U] [--host=H] [--port=P] DB1 [ DB2 ... ]\ """ import sys, os, re, optparse import pkgloader pkgloader.require('skytools', '3.0') import skytools from skytools.natsort import natsort_key # schemas, where .upgrade.sql is enough AUTO_UPGRADE = ('pgq', 'pgq_node', 'pgq_coop', 'londiste', 'pgq_ext') # fetch list of databases DB_LIST = "select datname from pg_database "\ " where not datistemplate and datallowconn "\ " order by 1" # dont support upgrade from 2.x (yet?) version_list = [ # schema, ver, filename, recheck_func ['pgq', '3.0', None, None], ['londiste', '3.0', None, None], ['pgq_ext', '2.1', None, None], ] def is_version_ge(a, b): """Return True if a is greater or equal than b.""" va = natsort_key(a) vb = natsort_key(b) return va >= vb def is_version_gt(a, b): """Return True if a is greater than b.""" va = natsort_key(a) vb = natsort_key(b) return va > vb def check_version(curs, schema, new_ver_str, recheck_func=None, force_gt=False): funcname = "%s.version" % schema if not skytools.exists_function(curs, funcname, 0): if recheck_func is not None: return recheck_func(curs), 'NULL' else: return 0, 'NULL' q = "select %s()" % funcname curs.execute(q) old_ver_str = curs.fetchone()[0] if force_gt: ok = is_version_gt(old_ver_str, new_ver_str) else: ok = is_version_ge(old_ver_str, new_ver_str) return ok, old_ver_str class DbUpgrade(skytools.DBScript): """Upgrade all Skytools schemas in Postgres cluster.""" def upgrade(self, dbname, db): """Upgrade all schemas in single db.""" curs = db.cursor() ignore = {} for schema, ver, fn, recheck_func in version_list: # skip schema? if schema in ignore: continue if not skytools.exists_schema(curs, schema): ignore[schema] = 1 continue # new enough? ok, oldver = check_version(curs, schema, ver, recheck_func, self.options.force) if ok: continue # too old schema, no way to upgrade if fn is None: self.log.info('%s: Cannot upgrade %s, too old version', dbname, schema) ignore[schema] = 1 continue if self.options.not_really: self.log.info ("%s: Would upgrade '%s' version %s to %s", dbname, schema, oldver, ver) continue curs = db.cursor() curs.execute('begin') self.log.info("%s: Upgrading '%s' version %s to %s", dbname, schema, oldver, ver) skytools.installer_apply_file(db, fn, self.log) curs.execute('commit') def work(self): """Loop over databases.""" self.set_single_loop(1) self.load_cur_versions() # loop over all dbs dblst = self.args if self.options.all: db = self.connect_db('postgres') curs = db.cursor() curs.execute(DB_LIST) dblst = [] for row in curs.fetchall(): dblst.append(row[0]) self.close_database('db') elif not dblst: raise skytools.UsageError('Give --all or list of database names on command line') # loop over connstrs for dbname in dblst: if self.last_sigint: break self.log.info("%s: connecting", dbname) db = self.connect_db(dbname) self.upgrade(dbname, db) self.close_database('db') def load_cur_versions(self): """Load current version numbers from .upgrade.sql files.""" vrc = re.compile(r"^ \s+ return \s+ '([0-9.]+)';", re.X | re.I | re.M) for s in AUTO_UPGRADE: fn = '%s.upgrade.sql' % s fqfn = skytools.installer_find_file(fn) try: f = open(fqfn, 'r') except IOError, d: raise skytools.UsageError('%s: cannot find upgrade file: %s [%s]' % (s, fqfn, str(d))) sql = f.read() f.close() m = vrc.search(sql) if not m: raise skytools.UsageError('%s: failed to detect version' % fqfn) ver = m.group(1) cur = [s, ver, fn, None] self.log.info("Loaded %s %s from %s", s, ver, fqfn) version_list.append(cur) def connect_db(self, dbname): """Create connect string, then connect.""" elems = ["dbname='%s'" % dbname] if self.options.host: elems.append("host='%s'" % self.options.host) if self.options.port: elems.append("port='%s'" % self.options.port) if self.options.user: elems.append("user='%s'" % self.options.user) cstr = ' '.join(elems) return self.get_database('db', connstr = cstr, autocommit = 1) def init_optparse(self, parser=None): """Setup command-line flags.""" p = skytools.DBScript.init_optparse(self, parser) p.set_usage(usage) g = optparse.OptionGroup(p, "options for skytools_upgrade") g.add_option("--all", action="store_true", help = 'upgrade all databases') g.add_option("--not-really", action = "store_true", dest = "not_really", default = False, help = "don't actually do anything") g.add_option("--user", help = 'username to use') g.add_option("--host", help = 'hostname to use') g.add_option("--port", help = 'port to use') g.add_option("--force", action = "store_true", help = 'upgrade even if schema versions are new enough') p.add_option_group(g) return p def load_config(self): """Disable config file.""" return skytools.Config(self.service_name, None, user_defs = {'use_skylog': '0', 'job_name': 'db_upgrade'}) if __name__ == '__main__': script = DbUpgrade('skytools_upgrade', sys.argv[1:]) script.start() skytools-3.2.6/scripts/catsql.py0000755000000000000000000000666712426435645013664 0ustar #! /usr/bin/env python """Prints out SQL files with psql command execution. Supported psql commands: \i, \cd, \q Others are skipped. Aditionally does some pre-processing for NDoc. NDoc is looks nice but needs some hand-holding. Bug: - function def end detection searches for 'as'/'is' but does not check word boundaries - finds them even in function name. That means in main conf, as/is must be disabled and $ ' added. This script can remove the unnecessary AS from output. Niceties: - Ndoc includes function def in output only if def is after comment. But for SQL functions its better to have it after def. This script can swap comment and def. - Optionally remove CREATE FUNCTION (OR REPLACE) from def to keep it shorter in doc. Note: - NDoc compares real function name and name in comment. if differ, it decides detection failed. """ import sys, os, re, getopt def usage(x): print("usage: catsql [--ndoc] FILE [FILE ...]") sys.exit(x) # NDoc specific changes cf_ndoc = 0 # compile regexes func_re = r"create\s+(or\s+replace\s+)?function\s+" func_rc = re.compile(func_re, re.I) comm_rc = re.compile(r"^\s*([#]\s*)?(?P--.*)", re.I) end_rc = re.compile(r"\b([;]|begin|declare|end)\b", re.I) as_rc = re.compile(r"\s+as\s+", re.I) cmd_rc = re.compile(r"^\\([a-z]*)(\s+.*)?", re.I) # conversion func def fix_func(ln): # if ndoc, replace AS with ' ' if cf_ndoc: return as_rc.sub(' ', ln) else: return ln # got function def def proc_func(f, ln): # remove CREATE OR REPLACE if cf_ndoc: ln = func_rc.sub('', ln) ln = fix_func(ln) pre_list = [ln] comm_list = [] while 1: ln = f.readline() if not ln: break com = None if cf_ndoc: com = comm_rc.search(ln) if cf_ndoc and com: pos = com.start('com') comm_list.append(ln[pos:]) elif end_rc.search(ln): break elif len(comm_list) > 0: break else: pre_list.append(fix_func(ln)) if len(comm_list) > 2: map(sys.stdout.write, comm_list) map(sys.stdout.write, pre_list) else: map(sys.stdout.write, pre_list) map(sys.stdout.write, comm_list) if ln: sys.stdout.write(fix_func(ln)) def cat_file(fn): sys.stdout.write("\n") f = open(fn) while 1: ln = f.readline() if not ln: break m = cmd_rc.search(ln) if m: cmd = m.group(1) if cmd == "i": # include a file fn2 = m.group(2).strip() cat_file(fn2) elif cmd == "q": # quit sys.exit(0) elif cmd == "cd": # chdir cd_dir = m.group(2).strip() os.chdir(cd_dir) else: # skip all others pass else: if func_rc.search(ln): # function header proc_func(f, ln) else: # normal sql sys.stdout.write(ln) sys.stdout.write("\n") def main(): global cf_ndoc try: opts, args = getopt.gnu_getopt(sys.argv[1:], 'h', ['ndoc']) except getopt.error, d: print(str(d)) usage(1) for o, v in opts: if o == "-h": usage(0) elif o == "--ndoc": cf_ndoc = 1 for fn in args: cat_file(fn) if __name__ == '__main__': main() skytools-3.2.6/scripts/queue_loader.py0000755000000000000000000004770712426435645015047 0ustar #! /usr/bin/env python """Load data from queue into tables, with optional partitioning. Config template:: [queue_loader] job_name = logfile = pidfile = db = #rename_tables = [DEFAULT] # fields - which fields to send through #fields = col1, col2, col3:renamed3 #fields = * # table_mode - how to handle a table # # ignore - ignore this table # direct - update table directly # split - split data into partitions #table_mode = ignore # split_mode - how to split, if requested # # by-batch-time: use batch time for splitting # by-event-time: use event time for splitting # by-date-field:fld - use fld for splitting #split_mode = by-batch-time # split_part - partition name format # # %(table_name)s %(year)s %(month)s %(day)s %(hour)s #split_part = %(table_name)s_%(year)s_%(month)s_%(day)s # split_part_template - How to create new partition tables # # Available fields: # %(part)s # %(parent)s # %(pkey)s # ### Non-inherited partitions #split_part_template = # create table %%(part)s (like %%(parent)s); # alter table only %%(part)s add primary key (%%(pkey)s); # ### Inherited partitions #split_part_template = # create table %%(part)s () inherits (%%(parent)s); # alter table only %%(part)s add primary key (%%(pkey)s); # row_mode - How to apply the events # # plain - each event creates SQL statement to run # keep_latest - change updates to DELETE + INSERT # keep_all - change updates to inserts, ignore deletes # bulk - instead of statement-per-row, do bulk updates #row_mode = plain # bulk_mode - How to do the bulk update # # correct - inserts as COPY into table, # update as COPY into temp table and single UPDATE from there # delete as COPY into temp table and single DELETE from there # delete - as 'correct', but do update as DELETE + COPY # merged - as 'delete', but merge insert rows with update rows #bulk_mode=correct [table public.foo] mode = create_sql = """ import sys, time import pkgloader pkgloader.require('skytools', '3.0') import skytools from pgq.cascade.worker import CascadedWorker from skytools import quote_ident, quote_fqident, UsageError # TODO: auto table detect # BulkLoader load method METH_CORRECT = 0 METH_DELETE = 1 METH_MERGED = 2 LOAD_METHOD = METH_CORRECT # BulkLoader hacks AVOID_BIZGRES_BUG = 0 USE_LONGLIVED_TEMP_TABLES = True class BasicLoader: """Apply events as-is.""" def __init__(self, table_name, parent_name, log): self.table_name = table_name self.parent_name = parent_name self.sql_list = [] self.log = log def add_row(self, op, data, pkey_list): if op == 'I': sql = skytools.mk_insert_sql(data, self.table_name, pkey_list) elif op == 'U': sql = skytools.mk_update_sql(data, self.table_name, pkey_list) elif op == 'D': sql = skytools.mk_delete_sql(data, self.table_name, pkey_list) else: raise Exception('bad operation: '+op) self.sql_list.append(sql) def flush(self, curs): if len(self.sql_list) > 0: curs.execute("\n".join(self.sql_list)) self.sql_list = [] class KeepLatestLoader(BasicLoader): """Keep latest row version. Updates are changed to delete + insert, deletes are ignored. Makes sense only for partitioned tables. """ def add_row(self, op, data, pkey_list): if op == 'U': BasicLoader.add_row(self, 'D', data, pkey_list) BasicLoader.add_row(self, 'I', data, pkey_list) elif op == 'I': BasicLoader.add_row(self, 'I', data, pkey_list) else: pass class KeepAllLoader(BasicLoader): """Keep all row versions. Updates are changed to inserts, deletes are ignored. Makes sense only for partitioned tables. """ def add_row(self, op, data, pkey_list): if op == 'U': op = 'I' elif op == 'D': return BasicLoader.add_row(self, op, data, pkey_list) class BulkEvent(object): """Helper class for BulkLoader to store relevant data.""" __slots__ = ('op', 'data', 'pk_data') def __init__(self, op, data, pk_data): self.op = op self.data = data self.pk_data = pk_data class BulkLoader(BasicLoader): """Instead of statement-per event, load all data with one big COPY, UPDATE or DELETE statement. """ fake_seq = 0 def __init__(self, table_name, parent_name, log): """Init per-batch table data cache.""" BasicLoader.__init__(self, table_name, parent_name, log) self.pkey_list = None self.dist_fields = None self.col_list = None self.ev_list = [] self.pkey_ev_map = {} def reset(self): self.ev_list = [] self.pkey_ev_map = {} def add_row(self, op, data, pkey_list): """Store new event.""" # get pkey value if self.pkey_list is None: self.pkey_list = pkey_list if len(self.pkey_list) > 0: pk_data = (data[k] for k in self.pkey_list) elif op == 'I': # fake pkey, just to get them spread out pk_data = self.fake_seq self.fake_seq += 1 else: raise Exception('non-pk tables not supported: %s' % self.table_name) # get full column list, detect added columns if not self.col_list: self.col_list = data.keys() elif self.col_list != data.keys(): # ^ supposedly python guarantees same order in keys() self.col_list = data.keys() # add to list ev = BulkEvent(op, data, pk_data) self.ev_list.append(ev) # keep all versions of row data if ev.pk_data in self.pkey_ev_map: self.pkey_ev_map[ev.pk_data].append(ev) else: self.pkey_ev_map[ev.pk_data] = [ev] def prepare_data(self): """Got all data, prepare for insertion.""" del_list = [] ins_list = [] upd_list = [] for ev_list in self.pkey_ev_map.values(): # rewrite list of I/U/D events to # optional DELETE and optional INSERT/COPY command exists_before = -1 exists_after = 1 for ev in ev_list: if ev.op == "I": if exists_before < 0: exists_before = 0 exists_after = 1 elif ev.op == "U": if exists_before < 0: exists_before = 1 #exists_after = 1 # this shouldnt be needed elif ev.op == "D": if exists_before < 0: exists_before = 1 exists_after = 0 else: raise Exception('unknown event type: %s' % ev.op) # skip short-lived rows if exists_before == 0 and exists_after == 0: continue # take last event ev = ev_list[-1] # generate needed commands if exists_before and exists_after: upd_list.append(ev.data) elif exists_before: del_list.append(ev.data) elif exists_after: ins_list.append(ev.data) return ins_list, upd_list, del_list def flush(self, curs): ins_list, upd_list, del_list = self.prepare_data() # reorder cols col_list = self.pkey_list[:] for k in self.col_list: if k not in self.pkey_list: col_list.append(k) real_update_count = len(upd_list) #self.log.debug("process_one_table: %s (I/U/D = %d/%d/%d)", # tbl, len(ins_list), len(upd_list), len(del_list)) # hack to unbroke stuff if LOAD_METHOD == METH_MERGED: upd_list += ins_list ins_list = [] # fetch distribution fields if self.dist_fields is None: self.dist_fields = self.find_dist_fields(curs) key_fields = self.pkey_list[:] for fld in self.dist_fields: if fld not in key_fields: key_fields.append(fld) #self.log.debug("PKey fields: %s Extra fields: %s", # ",".join(cache.pkey_list), ",".join(extra_fields)) # create temp table temp = self.create_temp_table(curs) tbl = self.table_name # where expr must have pkey and dist fields klist = [] for pk in key_fields: exp = "%s.%s = %s.%s" % (quote_fqident(tbl), quote_ident(pk), quote_fqident(temp), quote_ident(pk)) klist.append(exp) whe_expr = " and ".join(klist) # create del sql del_sql = "delete from only %s using %s where %s" % ( quote_fqident(tbl), quote_fqident(temp), whe_expr) # create update sql slist = [] for col in col_list: if col not in key_fields: exp = "%s = %s.%s" % (quote_ident(col), quote_fqident(temp), quote_ident(col)) slist.append(exp) upd_sql = "update only %s set %s from %s where %s" % ( quote_fqident(tbl), ", ".join(slist), quote_fqident(temp), whe_expr) # insert sql colstr = ",".join([quote_ident(c) for c in col_list]) ins_sql = "insert into %s (%s) select %s from %s" % ( quote_fqident(tbl), colstr, colstr, quote_fqident(temp)) temp_used = False # process deleted rows if len(del_list) > 0: #self.log.info("Deleting %d rows from %s", len(del_list), tbl) # delete old rows q = "truncate %s" % quote_fqident(temp) self.log.debug(q) curs.execute(q) # copy rows self.log.debug("COPY %d rows into %s", len(del_list), temp) skytools.magic_insert(curs, temp, del_list, col_list) # delete rows self.log.debug(del_sql) curs.execute(del_sql) self.log.debug("%s - %d", curs.statusmessage, curs.rowcount) if len(del_list) != curs.rowcount: self.log.warning("Delete mismatch: expected=%d deleted=%d", len(del_list), curs.rowcount) temp_used = True # process updated rows if len(upd_list) > 0: #self.log.info("Updating %d rows in %s", len(upd_list), tbl) # delete old rows q = "truncate %s" % quote_fqident(temp) self.log.debug(q) curs.execute(q) # copy rows self.log.debug("COPY %d rows into %s", len(upd_list), temp) skytools.magic_insert(curs, temp, upd_list, col_list) temp_used = True if LOAD_METHOD == METH_CORRECT: # update main table self.log.debug(upd_sql) curs.execute(upd_sql) self.log.debug("%s - %d", curs.statusmessage, curs.rowcount) # check count if len(upd_list) != curs.rowcount: self.log.warning("Update mismatch: expected=%d updated=%d", len(upd_list), curs.rowcount) else: # delete from main table self.log.debug(del_sql) curs.execute(del_sql) self.log.debug(curs.statusmessage) # check count if real_update_count != curs.rowcount: self.log.warning("Update mismatch: expected=%d deleted=%d", real_update_count, curs.rowcount) # insert into main table if AVOID_BIZGRES_BUG: # copy again, into main table self.log.debug("COPY %d rows into %s", len(upd_list), tbl) skytools.magic_insert(curs, tbl, upd_list, col_list) else: # better way, but does not work due bizgres bug self.log.debug(ins_sql) curs.execute(ins_sql) self.log.debug(curs.statusmessage) # process new rows if len(ins_list) > 0: self.log.info("Inserting %d rows into %s", len(ins_list), tbl) skytools.magic_insert(curs, tbl, ins_list, col_list) # delete remaining rows if temp_used: if USE_LONGLIVED_TEMP_TABLES: q = "truncate %s" % quote_fqident(temp) else: # fscking problems with long-lived temp tables q = "drop table %s" % quote_fqident(temp) self.log.debug(q) curs.execute(q) self.reset() def create_temp_table(self, curs): # create temp table for loading tempname = self.table_name.replace('.', '_') + "_loadertmp" # check if exists if USE_LONGLIVED_TEMP_TABLES: if skytools.exists_temp_table(curs, tempname): self.log.debug("Using existing temp table %s", tempname) return tempname # bizgres crashes on delete rows arg = "on commit delete rows" arg = "on commit preserve rows" # create temp table for loading q = "create temp table %s (like %s) %s" % ( quote_fqident(tempname), quote_fqident(self.table_name), arg) self.log.debug("Creating temp table: %s", q) curs.execute(q) return tempname def find_dist_fields(self, curs): if not skytools.exists_table(curs, "pg_catalog.mpp_distribution_policy"): return [] schema, name = skytools.fq_name_parts(self.table_name) q = "select a.attname"\ " from pg_class t, pg_namespace n, pg_attribute a,"\ " mpp_distribution_policy p"\ " where n.oid = t.relnamespace"\ " and p.localoid = t.oid"\ " and a.attrelid = t.oid"\ " and a.attnum = any(p.attrnums)"\ " and n.nspname = %s and t.relname = %s" curs.execute(q, [schema, name]) res = [] for row in curs.fetchall(): res.append(row[0]) return res class TableHandler: """Basic partitioned loader. Splits events into partitions, if requested. Then applies them without further processing. """ def __init__(self, rowhandler, table_name, table_mode, cf, log): self.part_map = {} self.rowhandler = rowhandler self.table_name = table_name self.quoted_name = quote_fqident(table_name) self.log = log if table_mode == 'direct': self.split = False elif table_mode == 'split': self.split = True smode = cf.get('split_mode', 'by-batch-time') sfield = None if smode.find(':') > 0: smode, sfield = smode.split(':', 1) self.split_field = sfield self.split_part = cf.get('split_part', '%(table_name)s_%(year)s_%(month)s_%(day)s') self.split_part_template = cf.get('split_part_template', '') if smode == 'by-batch-time': self.split_format = self.split_date_from_batch elif smode == 'by-event-time': self.split_format = self.split_date_from_event elif smode == 'by-date-field': self.split_format = self.split_date_from_field else: raise UsageError('Bad value for split_mode: '+smode) self.log.debug("%s: split_mode=%s, split_field=%s, split_part=%s", self.table_name, smode, self.split_field, self.split_part) elif table_mode == 'ignore': pass else: raise UsageError('Bad value for table_mode: '+table_mode) def split_date_from_batch(self, ev, data, batch_info): d = batch_info['batch_end'] vals = { 'table_name': self.table_name, 'year': "%04d" % d.year, 'month': "%02d" % d.month, 'day': "%02d" % d.day, 'hour': "%02d" % d.hour, } dst = self.split_part % vals return dst def split_date_from_event(self, ev, data, batch_info): d = ev.ev_date vals = { 'table_name': self.table_name, 'year': "%04d" % d.year, 'month': "%02d" % d.month, 'day': "%02d" % d.day, 'hour': "%02d" % d.hour, } dst = self.split_part % vals return dst def split_date_from_field(self, ev, data, batch_info): val = data[self.split_field] date, time = val.split(' ', 1) y, m, d = date.split('-') h, rest = time.split(':', 1) vals = { 'table_name': self.table_name, 'year': y, 'month': m, 'day': d, 'hour': h, } dst = self.split_part % vals return dst def add(self, curs, ev, batch_info): data = skytools.db_urldecode(ev.data) op, pkeys = ev.type.split(':', 1) pkey_list = pkeys.split(',') if self.split: dst = self.split_format(ev, data, batch_info) if dst not in self.part_map: self.check_part(curs, dst, pkey_list) else: dst = self.table_name if dst not in self.part_map: self.part_map[dst] = self.rowhandler(dst, self.table_name, self.log) p = self.part_map[dst] p.add_row(op, data, pkey_list) def flush(self, curs): for part in self.part_map.values(): part.flush(curs) def check_part(self, curs, dst, pkey_list): if skytools.exists_table(curs, dst): return if not self.split_part_template: raise UsageError('Partition %s does not exist and split_part_template not specified' % dst) vals = { 'dest': quote_fqident(dst), 'part': quote_fqident(dst), 'parent': quote_fqident(self.table_name), 'pkey': ",".join(pkey_list), # quoting? } sql = self.split_part_template % vals curs.execute(sql) class IgnoreTable(TableHandler): """Do-nothing.""" def add(self, curs, ev, batch_info): pass class QueueLoader(CascadedWorker): """Loader script.""" table_state = {} def reset(self): """Drop our caches on error.""" self.table_state = {} CascadedWorker.reset(self) def init_state(self, tbl): cf = self.cf if tbl in cf.cf.sections(): cf = cf.clone(tbl) table_mode = cf.get('table_mode', 'ignore') row_mode = cf.get('row_mode', 'plain') if table_mode == 'ignore': tblhandler = IgnoreTable else: tblhandler = TableHandler if row_mode == 'plain': rowhandler = BasicLoader elif row_mode == 'keep_latest': rowhandler = KeepLatestLoader elif row_mode == 'keep_all': rowhandler = KeepAllLoader elif row_mode == 'bulk': rowhandler = BulkLoader else: raise UsageError('Bad row_mode: '+row_mode) self.table_state[tbl] = tblhandler(rowhandler, tbl, table_mode, cf, self.log) def process_remote_event(self, src_curs, dst_curs, ev): t = ev.type[:2] if t not in ('I:', 'U:', 'D:'): CascadedWorker.process_remote_event(self, src_curs, dst_curs, ev) return tbl = ev.extra1 if tbl not in self.table_state: self.init_state(tbl) st = self.table_state[tbl] st.add(dst_curs, ev, self._batch_info) def finish_remote_batch(self, src_db, dst_db, tick_id): curs = dst_db.cursor() for st in self.table_state.values(): st.flush(curs) CascadedWorker.finish_remote_batch(self, src_db, dst_db, tick_id) if __name__ == '__main__': script = QueueLoader('queue_loader', 'db', sys.argv[1:]) script.start() skytools-3.2.6/scripts/simple_consumer.py0000755000000000000000000000354312426435645015567 0ustar #!/usr/bin/env python """Consumer that simply calls SQL query for each event. Config:: # source database src_db = # destination database dst_db = # query to call dst_query = select * from somefunc(%%(pgq.ev_data)s); ## Deprecated, use table_filter ## # filter for events (SQL fragment) consumer_filter = ev_extra1 = 'public.mytable1' """ import sys import pkgloader pkgloader.require('skytools', '3.0') import pgq import skytools class SimpleConsumer(pgq.Consumer): __doc__ = __doc__ def reload(self): super(SimpleConsumer, self).reload() self.dst_query = self.cf.get("dst_query") if self.cf.get("consumer_filter", ""): self.consumer_filter = self.cf.get("consumer_filter", "") def process_event(self, db, ev): curs = self.get_database('dst_db', autocommit = 1).cursor() if ev.ev_type[:2] not in ('I:', 'U:', 'D:'): return if ev.ev_data is None: payload = {} else: payload = skytools.db_urldecode(ev.ev_data) payload['pgq.tick_id'] = self.batch_info['cur_tick_id'] payload['pgq.ev_id'] = ev.ev_id payload['pgq.ev_time'] = ev.ev_time payload['pgq.ev_type'] = ev.ev_type payload['pgq.ev_data'] = ev.ev_data payload['pgq.ev_extra1'] = ev.ev_extra1 payload['pgq.ev_extra2'] = ev.ev_extra2 payload['pgq.ev_extra3'] = ev.ev_extra3 payload['pgq.ev_extra4'] = ev.ev_extra4 self.log.debug(self.dst_query, payload) curs.execute(self.dst_query, payload) if curs.statusmessage[:6] == 'SELECT': res = curs.fetchall() self.log.debug(res) else: self.log.debug(curs.statusmessage) if __name__ == '__main__': script = SimpleConsumer("simple_consumer3", "src_db", sys.argv[1:]) script.start() skytools-3.2.6/scripts/queue_mover.py0000755000000000000000000000215712426435645014717 0ustar #! /usr/bin/env python """This script simply mover events from one queue to another. Config parameters:: ## Parameters for queue_mover src_db = dbname=sourcedb dst_db = dbname=targetdb dst_queue_name = dest_queue """ import sys, os import pkgloader pkgloader.require('skytools', '3.0') import pgq class QueueMover(pgq.SerialConsumer): __doc__ = __doc__ def __init__(self, args): pgq.SerialConsumer.__init__(self, "queue_mover3", "src_db", "dst_db", args) self.dst_queue_name = self.cf.get("dst_queue_name") def process_remote_batch(self, db, batch_id, ev_list, dst_db): # load data rows = [] for ev in ev_list: data = [ev.type, ev.data, ev.extra1, ev.extra2, ev.extra3, ev.extra4, ev.time] rows.append(data) fields = ['type', 'data', 'extra1', 'extra2', 'extra3', 'extra4', 'time'] # insert data curs = dst_db.cursor() pgq.bulk_insert_events(curs, rows, fields, self.dst_queue_name) if __name__ == '__main__': script = QueueMover(sys.argv[1:]) script.start() skytools-3.2.6/scripts/grantfu.py0000755000000000000000000002600212426435645014024 0ustar #! /usr/bin/env python # GrantFu - GRANT/REVOKE generator for Postgres # # Copyright (c) 2005 Marko Kreen # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """Generator for PostgreSQL permissions. Loads config where roles, objects and their mapping is described and generates grants based on them. ConfigParser docs: http://docs.python.org/lib/module-ConfigParser.html Example: -------------------------------------------------------------------- [DEFAULT] users = user1, user2 # users to handle groups = group1, group2 # groups to handle auto_seq = 0 # dont handle seqs (default) # '!' after a table negates this setting for a table seq_name = id # the name for serial field (default: id) seq_usage = 0 # should we grant "usage" or "select, update" # for automatically handled sequences # section names can be random, but if you want to see them # in same order as in config file, then order them alphabetically [1.section] on.tables = testtbl, testtbl_id_seq, # here we handle seq by hand table_with_seq! # handle seq automatically # (table_with_seq_id_seq) user1 = select group1 = select, insert, update # instead of 'tables', you may use 'functions', 'languages', # 'schemas', 'tablespaces' --------------------------------------------------------------------- """ import sys, os, getopt from ConfigParser import SafeConfigParser __version__ = "1.0" R_NEW = 0x01 R_DEFS = 0x02 G_DEFS = 0x04 R_ONLY = 0x80 def usage(err): sys.stderr.write("usage: %s [-r|-R] CONF_FILE\n" % sys.argv[0]) sys.stderr.write(" -r Generate also REVOKE commands\n") sys.stderr.write(" -R Generate only REVOKE commands\n") sys.stderr.write(" -d Also REVOKE default perms\n") sys.stderr.write(" -D Only REVOKE default perms\n") sys.stderr.write(" -o Generate default GRANTS\n") sys.stderr.write(" -v Print program version\n") sys.stderr.write(" -t Put everything in one big transaction\n") sys.exit(err) class PConf(SafeConfigParser): "List support for ConfigParser" def __init__(self, defaults = None): SafeConfigParser.__init__(self, defaults) def get_list(self, sect, key): str = self.get(sect, key).strip() res = [] if not str: return res for val in str.split(","): res.append(val.strip()) return res class GrantFu: def __init__(self, cf, revoke): self.cf = cf self.revoke = revoke # avoid putting grantfu vars into defaults, thus into every section self.group_list = [] self.user_list = [] self.auto_seq = 0 self.seq_name = "id" self.seq_usage = 0 if self.cf.has_option('GrantFu', 'groups'): self.group_list = self.cf.get_list('GrantFu', 'groups') if self.cf.has_option('GrantFu', 'users'): self.user_list += self.cf.get_list('GrantFu', 'users') if self.cf.has_option('GrantFu', 'roles'): self.user_list += self.cf.get_list('GrantFu', 'roles') if self.cf.has_option('GrantFu', 'auto_seq'): self.auto_seq = self.cf.getint('GrantFu', 'auto_seq') if self.cf.has_option('GrantFu', 'seq_name'): self.seq_name = self.cf.get('GrantFu', 'seq_name') if self.cf.has_option('GrantFu', 'seq_usage'): self.seq_usage = self.cf.getint('GrantFu', 'seq_usage') # make string of all subjects tmp = [] for g in self.group_list: tmp.append("group " + g) for u in self.user_list: tmp.append(u) self.all_subjs = ", ".join(tmp) # per-section vars self.sect = None self.seq_list = [] self.seq_allowed = [] def process(self): if len(self.user_list) == 0 and len(self.group_list) == 0: return sect_list = self.cf.sections() sect_list.sort() for self.sect in sect_list: if self.sect == "GrantFu": continue print "\n-- %s --" % self.sect self.handle_tables() self.handle_other('on.databases', 'DATABASE') self.handle_other('on.functions', 'FUNCTION') self.handle_other('on.languages', 'LANGUAGE') self.handle_other('on.schemas', 'SCHEMA') self.handle_other('on.tablespaces', 'TABLESPACE') self.handle_other('on.sequences', 'SEQUENCE') self.handle_other('on.types', 'TYPE') self.handle_other('on.domains', 'DOMAIN') def handle_other(self, listname, obj_type): """Handle grants for all objects except tables.""" if not self.sect_hasvar(listname): return # don't parse list, as in case of functions it may be complicated obj_str = obj_type + " " + self.sect_var(listname) if self.revoke & R_NEW: self.gen_revoke(obj_str) if self.revoke & R_DEFS: self.gen_revoke_defs(obj_str, obj_type) if not self.revoke & R_ONLY: self.gen_one_type(obj_str) if self.revoke & G_DEFS: self.gen_defs(obj_str, obj_type) def handle_tables(self): """Handle grants for tables and sequences. The tricky part here is the automatic handling of sequences.""" if not self.sect_hasvar('on.tables'): return cleaned_list = [] table_list = self.sect_list('on.tables') for table in table_list: if table[-1] == '!': table = table[:-1] if not self.auto_seq: self.seq_list.append("%s_%s_seq" % (table, self.seq_name)) else: if self.auto_seq: self.seq_list.append("%s_%s_seq" % (table, self.seq_name)) cleaned_list.append(table) obj_str = "TABLE " + ", ".join(cleaned_list) if self.revoke & R_NEW: self.gen_revoke(obj_str) if self.revoke & R_DEFS: self.gen_revoke_defs(obj_str, "TABLE") if not self.revoke & R_ONLY: self.gen_one_type(obj_str) if self.revoke & G_DEFS: self.gen_defs(obj_str, "TABLE") # cleanup self.seq_list = [] self.seq_allowed = [] def gen_revoke(self, obj_str): "Generate revoke for one section / subject type (user or group)" if len(self.seq_list) > 0: obj_str += ", " + ", ".join(self.seq_list) obj_str = obj_str.strip().replace('\n', '\n ') print "REVOKE ALL ON %s\n FROM %s CASCADE;" % (obj_str, self.all_subjs) def gen_revoke_defs(self, obj_str, obj_type): "Generate revoke defaults for one section" # process only things that have default grants to public if obj_type not in ('FUNCTION', 'DATABASE', 'LANGUAGE', 'TYPE', 'DOMAIN'): return defrole = 'public' # if the sections contains grants to 'public', dont drop if self.sect_hasvar(defrole): return obj_str = obj_str.strip().replace('\n', '\n ') print "REVOKE ALL ON %s\n FROM %s CASCADE;" % (obj_str, defrole) def gen_defs(self, obj_str, obj_type): "Generate defaults grants for one section" if obj_type == "FUNCTION": defgrants = "execute" elif obj_type == "DATABASE": defgrants = "connect, temp" elif obj_type in ("LANGUAGE", "TYPE", "DOMAIN"): defgrants = "usage" else: return defrole = 'public' obj_str = obj_str.strip().replace('\n', '\n ') print "GRANT %s ON %s\n TO %s;" % (defgrants, obj_str, defrole) def gen_one_subj(self, subj, fqsubj, obj_str): if not self.sect_hasvar(subj): return obj_str = obj_str.strip().replace('\n', '\n ') perm = self.sect_var(subj).strip() if perm: print "GRANT %s ON %s\n TO %s;" % (perm, obj_str, fqsubj) # check for seq perms if len(self.seq_list) > 0: loperm = perm.lower() if loperm.find("insert") >= 0 or loperm.find("all") >= 0: self.seq_allowed.append(fqsubj) def gen_one_type(self, obj_str): "Generate GRANT for one section / one object type in section" for u in self.user_list: self.gen_one_subj(u, u, obj_str) for g in self.group_list: self.gen_one_subj(g, "group " + g, obj_str) # if there was any seq perms, generate grants if len(self.seq_allowed) > 0: seq_str = ", ".join(self.seq_list) subj_str = ", ".join(self.seq_allowed) if self.seq_usage: cmd = "GRANT usage ON SEQUENCE %s\n TO %s;" else: cmd = "GRANT select, update ON %s\n TO %s;" print cmd % (seq_str, subj_str) def sect_var(self, name): return self.cf.get(self.sect, name).strip() def sect_list(self, name): return self.cf.get_list(self.sect, name) def sect_hasvar(self, name): return self.cf.has_option(self.sect, name) def main(): revoke = 0 tx = False try: opts, args = getopt.getopt(sys.argv[1:], "vhrRdDot") except getopt.error, det: print "getopt error:", det usage(1) for o, v in opts: if o == "-h": usage(0) elif o == "-r": revoke |= R_NEW elif o == "-R": revoke |= R_NEW | R_ONLY elif o == "-d": revoke |= R_DEFS elif o == "-D": revoke |= R_DEFS | R_ONLY elif o == "-o": revoke |= G_DEFS elif o == "-t": tx = True elif o == "-v": print "GrantFu version", __version__ sys.exit(0) if len(args) != 1: usage(1) # load config cf = PConf() cf.read(args[0]) if not cf.has_section("GrantFu"): print "Incorrect config file, GrantFu sction missing" sys.exit(1) if tx: print "begin;\n" # revokes and default grants if revoke & (R_NEW | R_DEFS): g = GrantFu(cf, revoke | R_ONLY) g.process() revoke = revoke & R_ONLY # grants if revoke & R_ONLY == 0: g = GrantFu(cf, revoke & G_DEFS) g.process() if tx: print "\ncommit;\n" if __name__ == '__main__': main() skytools-3.2.6/upgrade/0000755000000000000000000000000012426435645011741 5ustar skytools-3.2.6/upgrade/final/0000755000000000000000000000000012426435645013032 5ustar skytools-3.2.6/upgrade/final/v2.1.5_pgq_core.sql0000644000000000000000000004210412426435645016264 0ustar begin; alter table pgq.subscription add constraint subscription_ukey unique (sub_queue, sub_consumer); create index rq_retry_owner_idx on pgq.retry_queue (ev_owner, ev_id); create or replace function pgq.current_event_table(x_queue_name text) returns text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.current_event_table(1) -- -- Return active event table for particular queue. -- Event can be added to it without going via functions, -- e.g. by COPY. -- -- Note: -- The result is valid only during current transaction. -- -- Permissions: -- Actual insertion requires superuser access. -- -- Parameters: -- x_queue_name - Queue name. -- ---------------------------------------------------------------------- declare res text; begin select queue_data_pfx || '_' || queue_cur_table into res from pgq.queue where queue_name = x_queue_name; if not found then raise exception 'Event queue not found'; end if; return res; end; $$ language plpgsql; -- no perms needed create or replace function pgq.event_failed( x_batch_id bigint, x_event_id bigint, x_reason text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.event_failed(3) -- -- Copies the event to failed queue so it can be looked at later. -- -- Parameters: -- x_batch_id - ID of active batch. -- x_event_id - Event id -- x_reason - Text to associate with event. -- -- Returns: -- 0 if event was already in queue, 1 otherwise. -- ---------------------------------------------------------------------- begin insert into pgq.failed_queue (ev_failed_reason, ev_failed_time, ev_id, ev_time, ev_txid, ev_owner, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4) select x_reason, now(), ev_id, ev_time, NULL, sub_id, coalesce(ev_retry, 0), ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4 from pgq.get_batch_events(x_batch_id), pgq.subscription where sub_batch = x_batch_id and ev_id = x_event_id; if not found then raise exception 'event not found'; end if; return 1; -- dont worry if the event is already in queue exception when unique_violation then return 0; end; $$ language plpgsql security definer; create or replace function pgq.event_retry( x_batch_id bigint, x_event_id bigint, x_retry_time timestamptz) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.event_retry(3) -- -- Put the event into retry queue, to be processed again later. -- -- Parameters: -- x_batch_id - ID of active batch. -- x_event_id - event id -- x_retry_time - Time when the event should be put back into queue -- -- Returns: -- nothing -- ---------------------------------------------------------------------- begin insert into pgq.retry_queue (ev_retry_after, ev_id, ev_time, ev_txid, ev_owner, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4) select x_retry_time, ev_id, ev_time, NULL, sub_id, coalesce(ev_retry, 0) + 1, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4 from pgq.get_batch_events(x_batch_id), pgq.subscription where sub_batch = x_batch_id and ev_id = x_event_id; if not found then raise exception 'event not found'; end if; return 1; -- dont worry if the event is already in queue exception when unique_violation then return 0; end; $$ language plpgsql security definer; create or replace function pgq.event_retry( x_batch_id bigint, x_event_id bigint, x_retry_seconds integer) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.event_retry(3) -- -- Put the event into retry queue, to be processed later again. -- -- Parameters: -- x_batch_id - ID of active batch. -- x_event_id - event id -- x_retry_seconds - Time when the event should be put back into queue -- -- Returns: -- nothing -- ---------------------------------------------------------------------- declare new_retry timestamptz; begin new_retry := current_timestamp + ((x_retry_seconds || ' seconds')::interval); return pgq.event_retry(x_batch_id, x_event_id, new_retry); end; $$ language plpgsql security definer; create or replace function pgq.force_tick(i_queue_name text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.force_tick(2) -- -- Simulate lots of events happening to force ticker to tick. -- -- Should be called in loop, with some delay until last tick -- changes or too much time is passed. -- -- Such function is needed because paraller calls of pgq.ticker() are -- dangerous, and cannot be protected with locks as snapshot -- is taken before locking. -- -- Parameters: -- i_queue_name - Name of the queue -- -- Returns: -- Currently last tick id. -- ---------------------------------------------------------------------- declare q record; t record; begin -- bump seq and get queue id select queue_id, setval(queue_event_seq, nextval(queue_event_seq) + queue_ticker_max_count * 2) as tmp into q from pgq.queue where queue_name = i_queue_name and not queue_external_ticker; if not found then raise exception 'queue not found or ticks not allowed'; end if; -- return last tick id select tick_id into t from pgq.tick where tick_queue = q.queue_id order by tick_queue desc, tick_id desc limit 1; return t.tick_id; end; $$ language plpgsql security definer; create or replace function pgq.grant_perms(x_queue_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.grant_perms(1) -- -- Make event tables readable by public. -- -- Parameters: -- x_queue_name - Name of the queue. -- -- Returns: -- nothing -- ---------------------------------------------------------------------- declare q record; i integer; tbl_perms text; seq_perms text; begin select * from pgq.queue into q where queue_name = x_queue_name; if not found then raise exception 'Queue not found'; end if; if true then -- safe, all access must go via functions seq_perms := 'select'; tbl_perms := 'select'; else -- allow ordinery users to directly insert -- to event tables. dangerous. seq_perms := 'select, update'; tbl_perms := 'select, insert'; end if; -- tick seq, normal users don't need to modify it execute 'grant ' || seq_perms || ' on ' || q.queue_tick_seq || ' to public'; -- event seq execute 'grant ' || seq_perms || ' on ' || q.queue_event_seq || ' to public'; -- parent table for events execute 'grant select on ' || q.queue_data_pfx || ' to public'; -- real event tables for i in 0 .. q.queue_ntables - 1 loop execute 'grant ' || tbl_perms || ' on ' || q.queue_data_pfx || '_' || i || ' to public'; end loop; return 1; end; $$ language plpgsql security definer; create or replace function pgq.insert_event(queue_name text, ev_type text, ev_data text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.insert_event(3) -- -- Insert a event into queue. -- -- Parameters: -- queue_name - Name of the queue -- ev_type - User-specified type for the event -- ev_data - User data for the event -- -- Returns: -- Event ID -- ---------------------------------------------------------------------- begin return pgq.insert_event(queue_name, ev_type, ev_data, null, null, null, null); end; $$ language plpgsql security definer; create or replace function pgq.insert_event( queue_name text, ev_type text, ev_data text, ev_extra1 text, ev_extra2 text, ev_extra3 text, ev_extra4 text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.insert_event(7) -- -- Insert a event into queue with all the extra fields. -- -- Parameters: -- queue_name - Name of the queue -- ev_type - User-specified type for the event -- ev_data - User data for the event -- ev_extra1 - Extra data field for the event -- ev_extra2 - Extra data field for the event -- ev_extra3 - Extra data field for the event -- ev_extra4 - Extra data field for the event -- -- Returns: -- Event ID -- ---------------------------------------------------------------------- begin return pgq.insert_event_raw(queue_name, null, now(), null, null, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4); end; $$ language plpgsql security definer; create or replace function pgq.maint_tables_to_vacuum() returns setof text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.maint_tables_to_vacuum(0) -- -- Returns list of tablenames that need frequent vacuuming. -- -- The goal is to avoid hardcoding them into maintenance process. -- -- Returns: -- List of table names. -- ---------------------------------------------------------------------- declare row record; begin return next 'pgq.subscription'; return next 'pgq.consumer'; return next 'pgq.queue'; return next 'pgq.tick'; return next 'pgq.retry_queue'; -- include also txid, pgq_ext and londiste tables if they exist for row in select n.nspname as scm, t.relname as tbl from pg_class t, pg_namespace n where n.oid = t.relnamespace and n.nspname = 'txid' and t.relname = 'epoch' union all select n.nspname as scm, t.relname as tbl from pg_class t, pg_namespace n where n.oid = t.relnamespace and n.nspname = 'londiste' and t.relname = 'completed' union all select n.nspname as scm, t.relname as tbl from pg_class t, pg_namespace n where n.oid = t.relnamespace and n.nspname = 'pgq_ext' and t.relname in ('completed_tick', 'completed_batch', 'completed_event', 'partial_batch') loop return next row.scm || '.' || row.tbl; end loop; return; end; $$ language plpgsql; create or replace function pgq.next_batch(x_queue_name text, x_consumer_name text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.next_batch(2) -- -- Makes next block of events active. -- -- If it returns NULL, there is no events available in queue. -- Consumer should sleep a bith then. -- -- Parameters: -- x_queue_name - Name of the queue -- x_consumer_name - Name of the consumer -- -- Returns: -- Batch ID or NULL if there are no more events available. -- ---------------------------------------------------------------------- declare next_tick bigint; batch_id bigint; errmsg text; sub record; begin select sub_queue, sub_consumer, sub_id, sub_last_tick, sub_batch into sub from pgq.queue q, pgq.consumer c, pgq.subscription s where q.queue_name = x_queue_name and c.co_name = x_consumer_name and s.sub_queue = q.queue_id and s.sub_consumer = c.co_id; if not found then errmsg := 'Not subscriber to queue: ' || coalesce(x_queue_name, 'NULL') || '/' || coalesce(x_consumer_name, 'NULL'); raise exception '%', errmsg; end if; -- has already active batch if sub.sub_batch is not null then return sub.sub_batch; end if; -- find next tick select tick_id into next_tick from pgq.tick where tick_id > sub.sub_last_tick and tick_queue = sub.sub_queue order by tick_queue asc, tick_id asc limit 1; if not found then -- nothing to do return null; end if; -- get next batch batch_id := nextval('pgq.batch_id_seq'); update pgq.subscription set sub_batch = batch_id, sub_next_tick = next_tick, sub_active = now() where sub_queue = sub.sub_queue and sub_consumer = sub.sub_consumer; return batch_id; end; $$ language plpgsql security definer; create or replace function pgq.register_consumer( x_queue_name text, x_consumer_id text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.register_consumer(2) -- -- Subscribe consumer on a queue. -- -- From this moment forward, consumer will see all events in the queue. -- -- Parameters: -- x_queue_name - Name of queue -- x_consumer_name - Name of consumer -- -- Returns: -- 0 - if already registered -- 1 - if new registration -- ---------------------------------------------------------------------- begin return pgq.register_consumer(x_queue_name, x_consumer_id, NULL); end; $$ language plpgsql security definer; create or replace function pgq.register_consumer( x_queue_name text, x_consumer_name text, x_tick_pos bigint) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.register_consumer(3) -- -- Extended registration, allows to specify tick_id. -- -- Note: -- For usage in special situations. -- -- Parameters: -- x_queue_name - Name of a queue -- x_consumer_name - Name of consumer -- x_tick_pos - Tick ID -- -- Returns: -- 0/1 whether consumer has already registered. -- ---------------------------------------------------------------------- declare tmp text; last_tick bigint; x_queue_id integer; x_consumer_id integer; queue integer; sub record; begin select queue_id into x_queue_id from pgq.queue where queue_name = x_queue_name; if not found then raise exception 'Event queue not created yet'; end if; -- get consumer and create if new select co_id into x_consumer_id from pgq.consumer where co_name = x_consumer_name; if not found then insert into pgq.consumer (co_name) values (x_consumer_name); x_consumer_id := currval('pgq.consumer_co_id_seq'); end if; -- if particular tick was requested, check if it exists if x_tick_pos is not null then perform 1 from pgq.tick where tick_queue = x_queue_id and tick_id = x_tick_pos; if not found then raise exception 'cannot reposition, tick not found: %', x_tick_pos; end if; end if; -- check if already registered select sub_last_tick, sub_batch into sub from pgq.subscription where sub_consumer = x_consumer_id and sub_queue = x_queue_id; if found then if x_tick_pos is not null then if sub.sub_batch is not null then raise exception 'reposition while active not allowed'; end if; -- update tick pos if requested update pgq.subscription set sub_last_tick = x_tick_pos where sub_consumer = x_consumer_id and sub_queue = x_queue_id; end if; -- already registered return 0; end if; -- new registration if x_tick_pos is null then -- start from current tick select tick_id into last_tick from pgq.tick where tick_queue = x_queue_id order by tick_queue desc, tick_id desc limit 1; if not found then raise exception 'No ticks for this queue. Please run ticker on database.'; end if; else last_tick := x_tick_pos; end if; -- register insert into pgq.subscription (sub_queue, sub_consumer, sub_last_tick) values (x_queue_id, x_consumer_id, last_tick); return 1; end; $$ language plpgsql security definer; create or replace function pgq.version() returns text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.version(0) -- -- Returns verison string for pgq. ATM its SkyTools version -- that is only bumped when PGQ database code changes. -- ---------------------------------------------------------------------- begin return '2.1.5'; end; $$ language plpgsql; grant usage on schema pgq to public; grant select on table pgq.consumer to public; grant select on table pgq.queue to public; grant select on table pgq.tick to public; grant select on table pgq.queue to public; grant select on table pgq.subscription to public; grant select on table pgq.event_template to public; grant select on table pgq.retry_queue to public; grant select on table pgq.failed_queue to public; end; skytools-3.2.6/upgrade/final/v2.1.5_pgq_ext.sql0000644000000000000000000000157412426435645016142 0ustar begin; create or replace function pgq_ext.get_last_tick(a_consumer text) returns int8 as $$ declare res int8; begin select last_tick_id into res from pgq_ext.completed_tick where consumer_id = a_consumer; return res; end; $$ language plpgsql security definer; create or replace function pgq_ext.set_last_tick(a_consumer text, a_tick_id bigint) returns integer as $$ begin if a_tick_id is null then delete from pgq_ext.completed_tick where consumer_id = a_consumer; else update pgq_ext.completed_tick set last_tick_id = a_tick_id where consumer_id = a_consumer; if not found then insert into pgq_ext.completed_tick (consumer_id, last_tick_id) values (a_consumer, a_tick_id); end if; end if; return 1; end; $$ language plpgsql security definer; end; skytools-3.2.6/upgrade/final/v2.1.6_londiste.sql0000644000000000000000000000021212426435645016301 0ustar begin; create or replace function londiste.version() returns text as $$ begin return '2.1.6'; end; $$ language plpgsql; end; skytools-3.2.6/upgrade/final/pgq.upgrade_2.1_to_3.0.sql0000644000000000000000000023643212426435645017444 0ustar -- -- This script upgrades pgq 2.1.x (x > 8) to 3.0. -- Later pgq.upgrade.sql should be applied to get to latest 3.x version. -- begin; -- new fields to pgq.queue alter table pgq.queue add column queue_disable_insert boolean; alter table pgq.queue add column queue_ticker_paused boolean; alter table pgq.queue add column queue_per_tx_limit int4; update pgq.queue set queue_disable_insert=false, queue_ticker_paused=false; alter table pgq.queue alter column queue_disable_insert set not null; alter table pgq.queue alter column queue_disable_insert set default false; alter table pgq.queue alter column queue_ticker_paused set not null; alter table pgq.queue alter column queue_ticker_paused set default false; -- new field to pgq.tick alter table pgq.tick add column tick_event_seq int8; -- surgery on pgq.retry_queue alter table pgq.retry_queue add column ev_queue int4; update pgq.retry_queue set ev_queue = sub_queue from pgq.subscription where ev_owner = sub_id; alter table pgq.retry_queue alter column ev_queue set not null; drop index pgq.rq_retry_owner_idx; -- surgery on pgq.subscription alter table pgq.retry_queue drop constraint rq_owner_fkey; alter table pgq.failed_queue drop constraint fq_owner_fkey; alter table pgq.subscription drop constraint subscription_pkey; alter table pgq.subscription drop constraint subscription_ukey; alter table pgq.subscription add constraint subscription_pkey primary key (sub_queue, sub_consumer); alter table pgq.subscription add constraint subscription_batch_idx unique (sub_batch); alter table pgq.subscription alter column sub_last_tick drop not null; -- drop failed queue functionality. not mandatory, who wants can keep it. drop function pgq.failed_event_list(text, text); drop function pgq.failed_event_list(text, text, integer, integer); drop function pgq.failed_event_count(text, text); drop function pgq.failed_event_delete(text, text, bigint); drop function pgq.failed_event_retry(text, text, bigint); drop function pgq.event_failed(bigint, bigint, text); drop table pgq.failed_queue; -- drop obsolete functions drop function pgq.ticker(text, bigint); drop function pgq.register_consumer(text, text, int8); -- drop types and related functions drop function pgq.get_batch_events(bigint); drop function pgq.get_batch_info(bigint); drop function pgq.get_consumer_info(); drop function pgq.get_consumer_info(text); drop function pgq.get_consumer_info(text, text); drop function pgq.get_queue_info(); drop function pgq.get_queue_info(text); drop type pgq.ret_batch_event; drop type pgq.ret_batch_info; drop type pgq.ret_consumer_info; drop type pgq.ret_queue_info; -- update all functions -- Section: Internal Functions -- install & launch schema upgrade create or replace function pgq.upgrade_schema() returns int4 as $$ -- updates table structure if necessary declare cnt int4 = 0; begin -- pgq.subscription.sub_last_tick: NOT NULL -> NULL perform 1 from information_schema.columns where table_schema = 'pgq' and table_name = 'subscription' and column_name ='sub_last_tick' and is_nullable = 'NO'; if found then alter table pgq.subscription alter column sub_last_tick drop not null; cnt := cnt + 1; end if; return cnt; end; $$ language plpgsql; select pgq.upgrade_schema(); -- Group: Low-level event handling create or replace function pgq.batch_event_sql(x_batch_id bigint) returns text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.batch_event_sql(1) -- Creates SELECT statement that fetches events for this batch. -- -- Parameters: -- x_batch_id - ID of a active batch. -- -- Returns: -- SQL statement. -- ---------------------------------------------------------------------- -- ---------------------------------------------------------------------- -- Algorithm description: -- Given 2 snapshots, sn1 and sn2 with sn1 having xmin1, xmax1 -- and sn2 having xmin2, xmax2 create expression that filters -- right txid's from event table. -- -- Simplest solution would be -- > WHERE ev_txid >= xmin1 AND ev_txid <= xmax2 -- > AND NOT txid_visible_in_snapshot(ev_txid, sn1) -- > AND txid_visible_in_snapshot(ev_txid, sn2) -- -- The simple solution has a problem with long transactions (xmin1 very low). -- All the batches that happen when the long tx is active will need -- to scan all events in that range. Here is 2 optimizations used: -- -- 1) Use [xmax1..xmax2] for range scan. That limits the range to -- txids that actually happened between two snapshots. For txids -- in the range [xmin1..xmax1] look which ones were actually -- committed between snapshots and search for them using exact -- values using IN (..) list. -- -- 2) As most TX are short, there could be lot of them that were -- just below xmax1, but were committed before xmax2. So look -- if there are ID's near xmax1 and lower the range to include -- them, thus decresing size of IN (..) list. -- ---------------------------------------------------------------------- declare rec record; sql text; tbl text; arr text; part text; select_fields text; retry_expr text; batch record; begin select s.sub_last_tick, s.sub_next_tick, s.sub_id, s.sub_queue, txid_snapshot_xmax(last.tick_snapshot) as tx_start, txid_snapshot_xmax(cur.tick_snapshot) as tx_end, last.tick_snapshot as last_snapshot, cur.tick_snapshot as cur_snapshot into batch from pgq.subscription s, pgq.tick last, pgq.tick cur where s.sub_batch = x_batch_id and last.tick_queue = s.sub_queue and last.tick_id = s.sub_last_tick and cur.tick_queue = s.sub_queue and cur.tick_id = s.sub_next_tick; if not found then raise exception 'batch not found'; end if; -- load older transactions arr := ''; for rec in -- active tx-es in prev_snapshot that were committed in cur_snapshot select id1 from txid_snapshot_xip(batch.last_snapshot) id1 left join txid_snapshot_xip(batch.cur_snapshot) id2 on (id1 = id2) where id2 is null order by 1 desc loop -- try to avoid big IN expression, so try to include nearby -- tx'es into range if batch.tx_start - 100 <= rec.id1 then batch.tx_start := rec.id1; else if arr = '' then arr := rec.id1::text; else arr := arr || ',' || rec.id1::text; end if; end if; end loop; -- must match pgq.event_template select_fields := 'select ev_id, ev_time, ev_txid, ev_retry, ev_type,' || ' ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4'; retry_expr := ' and (ev_owner is null or ev_owner = ' || batch.sub_id::text || ')'; -- now generate query that goes over all potential tables sql := ''; for rec in select xtbl from pgq.batch_event_tables(x_batch_id) xtbl loop tbl := rec.xtbl; -- this gets newer queries that definitely are not in prev_snapshot part := select_fields || ' from pgq.tick cur, pgq.tick last, ' || tbl || ' ev ' || ' where cur.tick_id = ' || batch.sub_next_tick::text || ' and cur.tick_queue = ' || batch.sub_queue::text || ' and last.tick_id = ' || batch.sub_last_tick::text || ' and last.tick_queue = ' || batch.sub_queue::text || ' and ev.ev_txid >= ' || batch.tx_start::text || ' and ev.ev_txid <= ' || batch.tx_end::text || ' and txid_visible_in_snapshot(ev.ev_txid, cur.tick_snapshot)' || ' and not txid_visible_in_snapshot(ev.ev_txid, last.tick_snapshot)' || retry_expr; -- now include older tx-es, that were ongoing -- at the time of prev_snapshot if arr <> '' then part := part || ' union all ' || select_fields || ' from ' || tbl || ' ev ' || ' where ev.ev_txid in (' || arr || ')' || retry_expr; end if; if sql = '' then sql := part; else sql := sql || ' union all ' || part; end if; end loop; if sql = '' then raise exception 'could not construct sql for batch %', x_batch_id; end if; return sql || ' order by 1'; end; $$ language plpgsql; -- no perms needed create or replace function pgq.batch_event_tables(x_batch_id bigint) returns setof text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.batch_event_tables(1) -- -- Returns set of table names where this batch events may reside. -- -- Parameters: -- x_batch_id - ID of a active batch. -- ---------------------------------------------------------------------- declare nr integer; tbl text; use_prev integer; use_next integer; batch record; begin select txid_snapshot_xmin(last.tick_snapshot) as tx_min, -- absolute minimum txid_snapshot_xmax(cur.tick_snapshot) as tx_max, -- absolute maximum q.queue_data_pfx, q.queue_ntables, q.queue_cur_table, q.queue_switch_step1, q.queue_switch_step2 into batch from pgq.tick last, pgq.tick cur, pgq.subscription s, pgq.queue q where cur.tick_id = s.sub_next_tick and cur.tick_queue = s.sub_queue and last.tick_id = s.sub_last_tick and last.tick_queue = s.sub_queue and s.sub_batch = x_batch_id and q.queue_id = s.sub_queue; if not found then raise exception 'Cannot find data for batch %', x_batch_id; end if; -- if its definitely not in one or other, look into both if batch.tx_max < batch.queue_switch_step1 then use_prev := 1; use_next := 0; elsif batch.queue_switch_step2 is not null and (batch.tx_min > batch.queue_switch_step2) then use_prev := 0; use_next := 1; else use_prev := 1; use_next := 1; end if; if use_prev then nr := batch.queue_cur_table - 1; if nr < 0 then nr := batch.queue_ntables - 1; end if; tbl := batch.queue_data_pfx || '_' || nr::text; return next tbl; end if; if use_next then tbl := batch.queue_data_pfx || '_' || batch.queue_cur_table::text; return next tbl; end if; return; end; $$ language plpgsql; -- no perms needed create or replace function pgq.event_retry_raw( x_queue text, x_consumer text, x_retry_after timestamptz, x_ev_id bigint, x_ev_time timestamptz, x_ev_retry integer, x_ev_type text, x_ev_data text, x_ev_extra1 text, x_ev_extra2 text, x_ev_extra3 text, x_ev_extra4 text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.event_retry_raw(12) -- -- Allows full control over what goes to retry queue. -- -- Parameters: -- x_queue - name of the queue -- x_consumer - name of the consumer -- x_retry_after - when the event should be processed again -- x_ev_id - event id -- x_ev_time - creation time -- x_ev_retry - retry count -- x_ev_type - user data -- x_ev_data - user data -- x_ev_extra1 - user data -- x_ev_extra2 - user data -- x_ev_extra3 - user data -- x_ev_extra4 - user data -- -- Returns: -- Event ID. -- ---------------------------------------------------------------------- declare q record; id bigint; begin select sub_id, queue_event_seq, sub_queue into q from pgq.consumer, pgq.queue, pgq.subscription where queue_name = x_queue and co_name = x_consumer and sub_consumer = co_id and sub_queue = queue_id; if not found then raise exception 'consumer not registered'; end if; id := x_ev_id; if id is null then id := nextval(q.queue_event_seq); end if; insert into pgq.retry_queue (ev_retry_after, ev_queue, ev_id, ev_time, ev_owner, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4) values (x_retry_after, q.sub_queue, id, x_ev_time, q.sub_id, x_ev_retry, x_ev_type, x_ev_data, x_ev_extra1, x_ev_extra2, x_ev_extra3, x_ev_extra4); return id; end; $$ language plpgsql security definer; create or replace function pgq.find_tick_helper( in i_queue_id int4, in i_prev_tick_id int8, in i_prev_tick_time timestamptz, in i_prev_tick_seq int8, in i_min_count int8, in i_min_interval interval, out next_tick_id int8, out next_tick_time timestamptz, out next_tick_seq int8) as $$ -- ---------------------------------------------------------------------- -- Function: pgq.find_tick_helper(6) -- -- Helper function for pgq.next_batch_custom() to do extended tick search. -- ---------------------------------------------------------------------- declare sure boolean; can_set boolean; t record; cnt int8; ival interval; begin -- first, fetch last tick of the queue select tick_id, tick_time, tick_event_seq into t from pgq.tick where tick_queue = i_queue_id and tick_id > i_prev_tick_id order by tick_queue desc, tick_id desc limit 1; if not found then return; end if; -- check whether batch would end up within reasonable limits sure := true; can_set := false; if i_min_count is not null then cnt = t.tick_event_seq - i_prev_tick_seq; if cnt >= i_min_count then can_set := true; end if; if cnt > i_min_count * 2 then sure := false; end if; end if; if i_min_interval is not null then ival = t.tick_time - i_prev_tick_time; if ival >= i_min_interval then can_set := true; end if; if ival > i_min_interval * 2 then sure := false; end if; end if; -- if last tick too far away, do large scan if not sure then select tick_id, tick_time, tick_event_seq into t from pgq.tick where tick_queue = i_queue_id and tick_id > i_prev_tick_id and ((i_min_count is not null and (tick_event_seq - i_prev_tick_seq) >= i_min_count) or (i_min_interval is not null and (tick_time - i_prev_tick_time) >= i_min_interval)) order by tick_queue asc, tick_id asc limit 1; can_set := true; end if; if can_set then next_tick_id := t.tick_id; next_tick_time := t.tick_time; next_tick_seq := t.tick_event_seq; end if; return; end; $$ language plpgsql stable; -- \i functions/pgq.insert_event_raw.sql -- ---------------------------------------------------------------------- -- Function: pgq.insert_event_raw(11) -- -- Actual event insertion. Used also by retry queue maintenance. -- -- Parameters: -- queue_name - Name of the queue -- ev_id - Event ID. If NULL, will be taken from seq. -- ev_time - Event creation time. -- ev_owner - Subscription ID when retry event. If NULL, the event is for everybody. -- ev_retry - Retry count. NULL for first-time events. -- ev_type - user data -- ev_data - user data -- ev_extra1 - user data -- ev_extra2 - user data -- ev_extra3 - user data -- ev_extra4 - user data -- -- Returns: -- Event ID. -- ---------------------------------------------------------------------- CREATE OR REPLACE FUNCTION pgq.insert_event_raw( queue_name text, ev_id bigint, ev_time timestamptz, ev_owner integer, ev_retry integer, ev_type text, ev_data text, ev_extra1 text, ev_extra2 text, ev_extra3 text, ev_extra4 text) RETURNS int8 AS '$libdir/pgq_lowlevel', 'pgq_insert_event_raw' LANGUAGE C; -- Group: Ticker create or replace function pgq.ticker(i_queue_name text, i_tick_id bigint, i_orig_timestamp timestamptz, i_event_seq bigint) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.ticker(3) -- -- External ticker: Insert a tick with a particular tick_id and timestamp. -- -- Parameters: -- i_queue_name - Name of the queue -- i_tick_id - Id of new tick. -- -- Returns: -- Tick id. -- ---------------------------------------------------------------------- begin insert into pgq.tick (tick_queue, tick_id, tick_time, tick_event_seq) select queue_id, i_tick_id, i_orig_timestamp, i_event_seq from pgq.queue where queue_name = i_queue_name and queue_external_ticker and not queue_ticker_paused; if not found then raise exception 'queue not found or ticker disabled: %', i_queue_name; end if; -- make sure seqs stay current perform pgq.seq_setval(queue_tick_seq, i_tick_id), pgq.seq_setval(queue_event_seq, i_event_seq) from pgq.queue where queue_name = i_queue_name; return i_tick_id; end; $$ language plpgsql security definer; -- unsure about access create or replace function pgq.ticker(i_queue_name text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.ticker(1) -- -- Check if tick is needed for the queue and insert it. -- -- For pgqadm usage. -- -- Parameters: -- i_queue_name - Name of the queue -- -- Returns: -- Tick id or NULL if no tick was done. -- ---------------------------------------------------------------------- declare res bigint; q record; state record; last2 record; begin select queue_id, queue_tick_seq, queue_external_ticker, queue_ticker_max_count, queue_ticker_max_lag, queue_ticker_idle_period, queue_event_seq, pgq.seq_getval(queue_event_seq) as event_seq, queue_ticker_paused into q from pgq.queue where queue_name = i_queue_name; if not found then raise exception 'no such queue'; end if; if q.queue_external_ticker then raise exception 'This queue has external tick source.'; end if; if q.queue_ticker_paused then raise exception 'Ticker has been paused for this queue'; end if; -- load state from last tick select now() - tick_time as lag, q.event_seq - tick_event_seq as new_events, tick_id, tick_time, tick_event_seq, txid_snapshot_xmax(tick_snapshot) as sxmax, txid_snapshot_xmin(tick_snapshot) as sxmin into state from pgq.tick where tick_queue = q.queue_id order by tick_queue desc, tick_id desc limit 1; if found then if state.sxmin > txid_current() then raise exception 'Invalid PgQ state: old xmin=%, old xmax=%, cur txid=%', state.sxmin, state.sxmax, txid_current(); end if; if state.new_events < 0 then raise warning 'Negative new_events? old=% cur=%', state.tick_event_seq, q.event_seq; end if; if state.sxmax > txid_current() then raise warning 'Dubious PgQ state: old xmax=%, cur txid=%', state.sxmax, txid_current(); end if; if state.new_events > 0 then -- there are new events, should we wait a bit? if state.new_events < q.queue_ticker_max_count and state.lag < q.queue_ticker_max_lag then return NULL; end if; else -- no new events, should we apply idle period? -- check previous event from the last one. select state.tick_time - tick_time as lag into last2 from pgq.tick where tick_queue = q.queue_id and tick_id < state.tick_id order by tick_queue desc, tick_id desc limit 1; if found then -- gradually decrease the tick frequency if (state.lag < q.queue_ticker_max_lag / 2) or (state.lag < last2.lag * 2 and state.lag < q.queue_ticker_idle_period) then return NULL; end if; end if; end if; end if; insert into pgq.tick (tick_queue, tick_id, tick_event_seq) values (q.queue_id, nextval(q.queue_tick_seq), q.event_seq); return currval(q.queue_tick_seq); end; $$ language plpgsql security definer; -- unsure about access create or replace function pgq.ticker() returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.ticker(0) -- -- Creates ticks for all unpaused queues which dont have external ticker. -- -- Returns: -- Number of queues that were processed. -- ---------------------------------------------------------------------- declare res bigint; q record; begin res := 0; for q in select queue_name from pgq.queue where not queue_external_ticker and not queue_ticker_paused order by queue_name loop if pgq.ticker(q.queue_name) > 0 then res := res + 1; end if; end loop; return res; end; $$ language plpgsql security definer; -- Group: Periodic maintenence create or replace function pgq.maint_retry_events() returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.maint_retry_events(0) -- -- Moves retry events back to main queue. -- -- It moves small amount at a time. It should be called -- until it returns 0 -- -- Returns: -- Number of events processed. -- ---------------------------------------------------------------------- declare cnt integer; rec record; begin cnt := 0; -- allow only single event mover at a time, without affecting inserts lock table pgq.retry_queue in share update exclusive mode; for rec in select queue_name, ev_id, ev_time, ev_owner, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4 from pgq.retry_queue, pgq.queue where ev_retry_after <= current_timestamp and queue_id = ev_queue order by ev_retry_after limit 10 loop cnt := cnt + 1; perform pgq.insert_event_raw(rec.queue_name, rec.ev_id, rec.ev_time, rec.ev_owner, rec.ev_retry, rec.ev_type, rec.ev_data, rec.ev_extra1, rec.ev_extra2, rec.ev_extra3, rec.ev_extra4); delete from pgq.retry_queue where ev_owner = rec.ev_owner and ev_id = rec.ev_id; end loop; return cnt; end; $$ language plpgsql; -- need admin access create or replace function pgq.maint_rotate_tables_step1(i_queue_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.maint_rotate_tables_step1(1) -- -- Rotate tables for one queue. -- -- Parameters: -- i_queue_name - Name of the queue -- -- Returns: -- 0 -- ---------------------------------------------------------------------- declare badcnt integer; cf record; nr integer; tbl text; lowest_tick_id int8; lowest_xmin int8; begin -- check if needed and load record select * from pgq.queue into cf where queue_name = i_queue_name and queue_rotation_period is not null and queue_switch_step2 is not null and queue_switch_time + queue_rotation_period < current_timestamp for update; if not found then return 0; end if; -- find lowest tick for that queue select min(sub_last_tick) into lowest_tick_id from pgq.subscription where sub_queue = cf.queue_id; -- if some consumer exists if lowest_tick_id is not null then -- is the slowest one still on previous table? select txid_snapshot_xmin(tick_snapshot) into lowest_xmin from pgq.tick where tick_queue = cf.queue_id and tick_id = lowest_tick_id; if not found then raise exception 'queue % maint failure: tick % not found', i_queue_name, lowest_tick_id; end if; if lowest_xmin <= cf.queue_switch_step2 then return 0; -- skip rotation then end if; end if; -- nobody on previous table, we can rotate -- calc next table number and name nr := cf.queue_cur_table + 1; if nr = cf.queue_ntables then nr := 0; end if; tbl := cf.queue_data_pfx || '_' || nr::text; -- there may be long lock on the table from pg_dump, -- detect it and skip rotate then begin execute 'lock table ' || tbl || ' nowait'; execute 'truncate ' || tbl; exception when lock_not_available then -- cannot truncate, skipping rotate return 0; end; -- remember the moment update pgq.queue set queue_cur_table = nr, queue_switch_time = current_timestamp, queue_switch_step1 = txid_current(), queue_switch_step2 = NULL where queue_id = cf.queue_id; -- Clean ticks by using step2 txid from previous rotation. -- That should keep all ticks for all batches that are completely -- in old table. This keeps them for longer than needed, but: -- 1. we want the pgq.tick table to be big, to avoid Postgres -- accitentally switching to seqscans on that. -- 2. that way we guarantee to consumers that they an be moved -- back on the queue at least for one rotation_period. -- (may help in disaster recovery) delete from pgq.tick where tick_queue = cf.queue_id and txid_snapshot_xmin(tick_snapshot) < cf.queue_switch_step2; return 0; end; $$ language plpgsql; -- need admin access create or replace function pgq.maint_rotate_tables_step2() returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.maint_rotate_tables_step2(0) -- -- Stores the txid when the rotation was visible. It should be -- called in separate transaction than pgq.maint_rotate_tables_step1() -- ---------------------------------------------------------------------- begin update pgq.queue set queue_switch_step2 = txid_current() where queue_switch_step2 is null; return 0; end; $$ language plpgsql; -- need admin access create or replace function pgq.maint_tables_to_vacuum() returns setof text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.maint_tables_to_vacuum(0) -- -- Returns list of tablenames that need frequent vacuuming. -- -- The goal is to avoid hardcoding them into maintenance process. -- -- Returns: -- List of table names. -- ---------------------------------------------------------------------- declare scm text; tbl text; fqname text; begin -- assume autovacuum handles them fine if current_setting('autovacuum') = 'on' then return; end if; for scm, tbl in values ('pgq', 'subscription'), ('pgq', 'consumer'), ('pgq', 'queue'), ('pgq', 'tick'), ('pgq', 'retry_queue'), ('pgq_ext', 'completed_tick'), ('pgq_ext', 'completed_batch'), ('pgq_ext', 'completed_event'), ('pgq_ext', 'partial_batch'), --('pgq_node', 'node_location'), --('pgq_node', 'node_info'), ('pgq_node', 'local_state'), --('pgq_node', 'subscriber_info'), --('londiste', 'table_info'), ('londiste', 'seq_info'), --('londiste', 'applied_execute'), --('londiste', 'pending_fkeys'), ('txid', 'epoch'), ('londiste', 'completed') loop select n.nspname || '.' || t.relname into fqname from pg_class t, pg_namespace n where n.oid = t.relnamespace and n.nspname = scm and t.relname = tbl; if found then return next fqname; end if; end loop; return; end; $$ language plpgsql; create or replace function pgq.maint_operations(out func_name text, out func_arg text) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq.maint_operations(0) -- -- Returns list of functions to call for maintenance. -- -- The goal is to avoid hardcoding them into maintenance process. -- -- Function signature: -- Function should take either 1 or 0 arguments and return 1 if it wants -- to be called immediately again, 0 if not. -- -- Returns: -- func_name - Function to call -- func_arg - Optional argument to function (queue name) -- ---------------------------------------------------------------------- declare ops text[]; nrot int4; begin -- rotate step 1 nrot := 0; func_name := 'pgq.maint_rotate_tables_step1'; for func_arg in select queue_name from pgq.queue where queue_rotation_period is not null and queue_switch_step2 is not null and queue_switch_time + queue_rotation_period < current_timestamp order by 1 loop nrot := nrot + 1; return next; end loop; -- rotate step 2 if nrot > 0 then func_name := 'pgq.maint_rotate_tables_step2'; func_arg := NULL; return next; end if; -- check if extra field exists perform 1 from pg_attribute where attrelid = 'pgq.queue'::regclass and attname = 'queue_extra_maint'; if found then -- add extra ops for func_arg, ops in select q.queue_name, queue_extra_maint from pgq.queue q where queue_extra_maint is not null order by 1 loop for i in array_lower(ops, 1) .. array_upper(ops, 1) loop func_name = ops[i]; return next; end loop; end loop; end if; -- vacuum tables func_name := 'vacuum'; for func_arg in select * from pgq.maint_tables_to_vacuum() loop return next; end loop; -- -- pgq_node & londiste -- -- although they belong to queue_extra_maint, they are -- common enough so its more effective to handle them here. -- perform 1 from pg_proc p, pg_namespace n where p.pronamespace = n.oid and n.nspname = 'pgq_node' and p.proname = 'maint_watermark'; if found then func_name := 'pgq_node.maint_watermark'; for func_arg in select n.queue_name from pgq_node.node_info n where n.node_type = 'root' loop return next; end loop; end if; perform 1 from pg_proc p, pg_namespace n where p.pronamespace = n.oid and n.nspname = 'londiste' and p.proname = 'root_check_seqs'; if found then func_name := 'londiste.root_check_seqs'; for func_arg in select distinct s.queue_name from londiste.seq_info s, pgq_node.node_info n where s.local and n.node_type = 'root' and n.queue_name = s.queue_name loop return next; end loop; end if; return; end; $$ language plpgsql; -- Group: Random utility functions create or replace function pgq.grant_perms(x_queue_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.grant_perms(1) -- -- Make event tables readable by public. -- -- Parameters: -- x_queue_name - Name of the queue. -- -- Returns: -- nothing -- ---------------------------------------------------------------------- declare q record; i integer; tbl_perms text; seq_perms text; begin select * from pgq.queue into q where queue_name = x_queue_name; if not found then raise exception 'Queue not found'; end if; if true then -- safe, all access must go via functions seq_perms := 'select'; tbl_perms := 'select'; else -- allow ordinery users to directly insert -- to event tables. dangerous. seq_perms := 'select, update'; tbl_perms := 'select, insert'; end if; -- tick seq, normal users don't need to modify it execute 'grant ' || seq_perms || ' on ' || q.queue_tick_seq || ' to public'; -- event seq execute 'grant ' || seq_perms || ' on ' || q.queue_event_seq || ' to public'; -- parent table for events execute 'grant select on ' || q.queue_data_pfx || ' to public'; -- real event tables for i in 0 .. q.queue_ntables - 1 loop execute 'grant ' || tbl_perms || ' on ' || q.queue_data_pfx || '_' || i::text || ' to public'; end loop; return 1; end; $$ language plpgsql security definer; create or replace function pgq.tune_storage(i_queue_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.tune_storage(1) -- -- Tunes storage settings for queue data tables -- ---------------------------------------------------------------------- declare tbl text; tbloid oid; q record; i int4; sql text; pgver int4; begin pgver := current_setting('server_version_num'); select * into q from pgq.queue where queue_name = i_queue_name; if not found then return 0; end if; for i in 0 .. (q.queue_ntables - 1) loop tbl := q.queue_data_pfx || '_' || i::text; -- set fillfactor sql := 'alter table ' || tbl || ' set (fillfactor = 100'; -- autovacuum for 8.4+ if pgver >= 80400 then sql := sql || ', autovacuum_enabled=off, toast.autovacuum_enabled =off'; end if; sql := sql || ')'; execute sql; -- autovacuum for 8.3 if pgver < 80400 then tbloid := tbl::regclass::oid; delete from pg_catalog.pg_autovacuum where vacrelid = tbloid; insert into pg_catalog.pg_autovacuum values (tbloid, false, -1,-1,-1,-1,-1,-1,-1,-1); end if; end loop; return 1; end; $$ language plpgsql strict; create or replace function pgq.force_tick(i_queue_name text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.force_tick(2) -- -- Simulate lots of events happening to force ticker to tick. -- -- Should be called in loop, with some delay until last tick -- changes or too much time is passed. -- -- Such function is needed because paraller calls of pgq.ticker() are -- dangerous, and cannot be protected with locks as snapshot -- is taken before locking. -- -- Parameters: -- i_queue_name - Name of the queue -- -- Returns: -- Currently last tick id. -- ---------------------------------------------------------------------- declare q record; t record; begin -- bump seq and get queue id select queue_id, setval(queue_event_seq, nextval(queue_event_seq) + queue_ticker_max_count * 2 + 1000) as tmp into q from pgq.queue where queue_name = i_queue_name and not queue_external_ticker and not queue_ticker_paused; --if not found then -- raise notice 'queue not found or ticks not allowed'; --end if; -- return last tick id select tick_id into t from pgq.tick, pgq.queue where tick_queue = queue_id and queue_name = i_queue_name order by tick_queue desc, tick_id desc limit 1; return t.tick_id; end; $$ language plpgsql security definer; create or replace function pgq.seq_getval(i_seq_name text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.seq_getval(1) -- -- Read current last_val from seq, without affecting it. -- -- Parameters: -- i_seq_name - Name of the sequence -- -- Returns: -- last value. -- ---------------------------------------------------------------------- declare res int8; fqname text; pos integer; s text; n text; begin pos := position('.' in i_seq_name); if pos > 0 then s := substring(i_seq_name for pos - 1); n := substring(i_seq_name from pos + 1); else s := 'public'; n := i_seq_name; end if; fqname := quote_ident(s) || '.' || quote_ident(n); execute 'select last_value from ' || fqname into res; return res; end; $$ language plpgsql; create or replace function pgq.seq_setval(i_seq_name text, i_new_value int8) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.seq_setval(2) -- -- Like setval() but does not allow going back. -- -- Parameters: -- i_seq_name - Name of the sequence -- i_new_value - new value -- -- Returns: -- current last value. -- ---------------------------------------------------------------------- declare res int8; fqname text; pos integer; s text; n text; begin pos := position('.' in i_seq_name); if pos > 0 then s := substring(i_seq_name for pos - 1); n := substring(i_seq_name from pos + 1); else s := 'public'; n := i_seq_name; end if; fqname := quote_ident(s) || '.' || quote_ident(n); res := pgq.seq_getval(i_seq_name); if res < i_new_value then perform setval(fqname, i_new_value); return i_new_value; end if; return res; end; $$ language plpgsql; -- Section: Public Functions -- Group: Queue creation create or replace function pgq.create_queue(i_queue_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.create_queue(1) -- -- Creates new queue with given name. -- -- Returns: -- 0 - queue already exists -- 1 - queue created -- ---------------------------------------------------------------------- declare tblpfx text; tblname text; idxpfx text; idxname text; sql text; id integer; tick_seq text; ev_seq text; n_tables integer; begin if i_queue_name is null then raise exception 'Invalid NULL value'; end if; -- check if exists perform 1 from pgq.queue where queue_name = i_queue_name; if found then return 0; end if; -- insert event id := nextval('pgq.queue_queue_id_seq'); tblpfx := 'pgq.event_' || id::text; idxpfx := 'event_' || id::text; tick_seq := 'pgq.event_' || id::text || '_tick_seq'; ev_seq := 'pgq.event_' || id::text || '_id_seq'; insert into pgq.queue (queue_id, queue_name, queue_data_pfx, queue_event_seq, queue_tick_seq) values (id, i_queue_name, tblpfx, ev_seq, tick_seq); select queue_ntables into n_tables from pgq.queue where queue_id = id; -- create seqs execute 'CREATE SEQUENCE ' || tick_seq; execute 'CREATE SEQUENCE ' || ev_seq; -- create data tables execute 'CREATE TABLE ' || tblpfx || ' () ' || ' INHERITS (pgq.event_template)'; for i in 0 .. (n_tables - 1) loop tblname := tblpfx || '_' || i::text; idxname := idxpfx || '_' || i::text; execute 'CREATE TABLE ' || tblname || ' () ' || ' INHERITS (' || tblpfx || ')'; execute 'ALTER TABLE ' || tblname || ' ALTER COLUMN ev_id ' || ' SET DEFAULT nextval(' || quote_literal(ev_seq) || ')'; execute 'create index ' || idxname || '_txid_idx on ' || tblname || ' (ev_txid)'; end loop; perform pgq.grant_perms(i_queue_name); perform pgq.ticker(i_queue_name); perform pgq.tune_storage(i_queue_name); return 1; end; $$ language plpgsql security definer; create or replace function pgq.drop_queue(x_queue_name text, x_force bool) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.drop_queue(2) -- -- Drop queue and all associated tables. -- -- Parameters: -- x_queue_name - queue name -- x_force - ignore consumers -- ---------------------------------------------------------------------- declare tblname text; q record; num integer; begin -- check if exists select * into q from pgq.queue where queue_name = x_queue_name for update; if not found then raise exception 'No such event queue'; end if; if x_force then perform pgq.unregister_consumer(queue_name, consumer_name) from pgq.get_consumer_info(x_queue_name); else -- check if no consumers select count(*) into num from pgq.subscription where sub_queue = q.queue_id; if num > 0 then raise exception 'cannot drop queue, consumers still attached'; end if; end if; -- drop data tables for i in 0 .. (q.queue_ntables - 1) loop tblname := q.queue_data_pfx || '_' || i::text; execute 'DROP TABLE ' || tblname; end loop; execute 'DROP TABLE ' || q.queue_data_pfx; -- delete ticks delete from pgq.tick where tick_queue = q.queue_id; -- drop seqs -- FIXME: any checks needed here? execute 'DROP SEQUENCE ' || q.queue_tick_seq; execute 'DROP SEQUENCE ' || q.queue_event_seq; -- delete event delete from pgq.queue where queue_name = x_queue_name; return 1; end; $$ language plpgsql security definer; create or replace function pgq.drop_queue(x_queue_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.drop_queue(1) -- -- Drop queue and all associated tables. -- No consumers must be listening on the queue. -- -- ---------------------------------------------------------------------- begin return pgq.drop_queue(x_queue_name, false); end; $$ language plpgsql strict; create or replace function pgq.set_queue_config( x_queue_name text, x_param_name text, x_param_value text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.set_queue_config(3) -- -- -- Set configuration for specified queue. -- -- Parameters: -- x_queue_name - Name of the queue to configure. -- x_param_name - Configuration parameter name. -- x_param_value - Configuration parameter value. -- -- Returns: -- 0 if event was already in queue, 1 otherwise. -- ---------------------------------------------------------------------- declare v_param_name text; begin -- discard NULL input if x_queue_name is null or x_param_name is null then raise exception 'Invalid NULL value'; end if; -- check if queue exists perform 1 from pgq.queue where queue_name = x_queue_name; if not found then raise exception 'No such event queue'; end if; -- check if valid parameter name v_param_name := 'queue_' || x_param_name; if v_param_name not in ( 'queue_ticker_max_count', 'queue_ticker_max_lag', 'queue_ticker_idle_period', 'queue_ticker_paused', 'queue_rotation_period', 'queue_external_ticker') then raise exception 'cannot change parameter "%s"', x_param_name; end if; execute 'update pgq.queue set ' || v_param_name || ' = ' || quote_literal(x_param_value) || ' where queue_name = ' || quote_literal(x_queue_name); return 1; end; $$ language plpgsql security definer; -- Group: Event publishing create or replace function pgq.insert_event(queue_name text, ev_type text, ev_data text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.insert_event(3) -- -- Insert a event into queue. -- -- Parameters: -- queue_name - Name of the queue -- ev_type - User-specified type for the event -- ev_data - User data for the event -- -- Returns: -- Event ID -- ---------------------------------------------------------------------- begin return pgq.insert_event(queue_name, ev_type, ev_data, null, null, null, null); end; $$ language plpgsql security definer; create or replace function pgq.insert_event( queue_name text, ev_type text, ev_data text, ev_extra1 text, ev_extra2 text, ev_extra3 text, ev_extra4 text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.insert_event(7) -- -- Insert a event into queue with all the extra fields. -- -- Parameters: -- queue_name - Name of the queue -- ev_type - User-specified type for the event -- ev_data - User data for the event -- ev_extra1 - Extra data field for the event -- ev_extra2 - Extra data field for the event -- ev_extra3 - Extra data field for the event -- ev_extra4 - Extra data field for the event -- -- Returns: -- Event ID -- ---------------------------------------------------------------------- begin return pgq.insert_event_raw(queue_name, null, now(), null, null, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4); end; $$ language plpgsql security definer; create or replace function pgq.current_event_table(x_queue_name text) returns text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.current_event_table(1) -- -- Return active event table for particular queue. -- Event can be added to it without going via functions, -- e.g. by COPY. -- -- Note: -- The result is valid only during current transaction. -- -- Permissions: -- Actual insertion requires superuser access. -- -- Parameters: -- x_queue_name - Queue name. -- ---------------------------------------------------------------------- declare res text; disabled boolean; begin select queue_data_pfx || '_' || queue_cur_table::text, queue_disable_insert into res, disabled from pgq.queue where queue_name = x_queue_name; if not found then raise exception 'Event queue not found'; end if; if disabled then if current_setting('session_replication_role') <> 'replica' then raise exception 'Writing to queue disabled'; end if; end if; return res; end; $$ language plpgsql; -- no perms needed -- Group: Subscribing to queue create or replace function pgq.register_consumer( x_queue_name text, x_consumer_id text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.register_consumer(2) -- -- Subscribe consumer on a queue. -- -- From this moment forward, consumer will see all events in the queue. -- -- Parameters: -- x_queue_name - Name of queue -- x_consumer_name - Name of consumer -- -- Returns: -- 0 - if already registered -- 1 - if new registration -- ---------------------------------------------------------------------- begin return pgq.register_consumer_at(x_queue_name, x_consumer_id, NULL); end; $$ language plpgsql security definer; create or replace function pgq.register_consumer_at( x_queue_name text, x_consumer_name text, x_tick_pos bigint) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.register_consumer(3) -- -- Extended registration, allows to specify tick_id. -- -- Note: -- For usage in special situations. -- -- Parameters: -- x_queue_name - Name of a queue -- x_consumer_name - Name of consumer -- x_tick_pos - Tick ID -- -- Returns: -- 0/1 whether consumer has already registered. -- ---------------------------------------------------------------------- declare tmp text; last_tick bigint; x_queue_id integer; x_consumer_id integer; queue integer; sub record; begin select queue_id into x_queue_id from pgq.queue where queue_name = x_queue_name; if not found then raise exception 'Event queue not created yet'; end if; -- get consumer and create if new select co_id into x_consumer_id from pgq.consumer where co_name = x_consumer_name; if not found then insert into pgq.consumer (co_name) values (x_consumer_name); x_consumer_id := currval('pgq.consumer_co_id_seq'); end if; -- if particular tick was requested, check if it exists if x_tick_pos is not null then perform 1 from pgq.tick where tick_queue = x_queue_id and tick_id = x_tick_pos; if not found then raise exception 'cannot reposition, tick not found: %', x_tick_pos; end if; end if; -- check if already registered select sub_last_tick, sub_batch into sub from pgq.subscription where sub_consumer = x_consumer_id and sub_queue = x_queue_id; if found then if x_tick_pos is not null then if sub.sub_batch is not null then raise exception 'reposition while active not allowed'; end if; -- update tick pos if requested update pgq.subscription set sub_last_tick = x_tick_pos, sub_active = now() where sub_consumer = x_consumer_id and sub_queue = x_queue_id; end if; -- already registered return 0; end if; -- new registration if x_tick_pos is null then -- start from current tick select tick_id into last_tick from pgq.tick where tick_queue = x_queue_id order by tick_queue desc, tick_id desc limit 1; if not found then raise exception 'No ticks for this queue. Please run ticker on database.'; end if; else last_tick := x_tick_pos; end if; -- register insert into pgq.subscription (sub_queue, sub_consumer, sub_last_tick) values (x_queue_id, x_consumer_id, last_tick); return 1; end; $$ language plpgsql security definer; create or replace function pgq.unregister_consumer( x_queue_name text, x_consumer_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.unregister_consumer(2) -- -- Unsubscriber consumer from the queue. Also consumer's -- retry events are deleted. -- -- Parameters: -- x_queue_name - Name of the queue -- x_consumer_name - Name of the consumer -- -- Returns: -- nothing -- ---------------------------------------------------------------------- declare x_sub_id integer; _sub_id_cnt integer; begin select sub_id into x_sub_id from pgq.subscription, pgq.consumer, pgq.queue where sub_queue = queue_id and sub_consumer = co_id and queue_name = x_queue_name and co_name = x_consumer_name; if not found then raise exception 'consumer not registered on queue'; end if; -- subconsumer-aware select count(*) into _sub_id_cnt from pgq.subscription where sub_id = x_sub_id; if _sub_id_cnt > 1 then raise exception 'unregistered subconsumers detected'; end if; delete from pgq.retry_queue where ev_owner = x_sub_id; delete from pgq.subscription where sub_id = x_sub_id; return 1; end; $$ language plpgsql security definer; -- Group: Batch processing create or replace function pgq.next_batch_info( in i_queue_name text, in i_consumer_name text, out batch_id int8, out cur_tick_id int8, out prev_tick_id int8, out cur_tick_time timestamptz, out prev_tick_time timestamptz, out cur_tick_event_seq int8, out prev_tick_event_seq int8) as $$ -- ---------------------------------------------------------------------- -- Function: pgq.next_batch_info(2) -- -- Makes next block of events active. -- -- If it returns NULL, there is no events available in queue. -- Consumer should sleep then. -- -- The values from event_id sequence may give hint how big the -- batch may be. But they are inexact, they do not give exact size. -- Client *MUST NOT* use them to detect whether the batch contains any -- events at all - the values are unfit for that purpose. -- -- Parameters: -- i_queue_name - Name of the queue -- i_consumer_name - Name of the consumer -- -- Returns: -- batch_id - Batch ID or NULL if there are no more events available. -- cur_tick_id - End tick id. -- cur_tick_time - End tick time. -- cur_tick_event_seq - Value from event id sequence at the time tick was issued. -- prev_tick_id - Start tick id. -- prev_tick_time - Start tick time. -- prev_tick_event_seq - value from event id sequence at the time tick was issued. -- ---------------------------------------------------------------------- begin select f.batch_id, f.cur_tick_id, f.prev_tick_id, f.cur_tick_time, f.prev_tick_time, f.cur_tick_event_seq, f.prev_tick_event_seq into batch_id, cur_tick_id, prev_tick_id, cur_tick_time, prev_tick_time, cur_tick_event_seq, prev_tick_event_seq from pgq.next_batch_custom(i_queue_name, i_consumer_name, NULL, NULL, NULL) f; return; end; $$ language plpgsql; drop function pgq.next_batch(text, text); create or replace function pgq.next_batch( in i_queue_name text, in i_consumer_name text) returns int8 as $$ -- ---------------------------------------------------------------------- -- Function: pgq.next_batch(2) -- -- Old function that returns just batch_id. -- -- Parameters: -- i_queue_name - Name of the queue -- i_consumer_name - Name of the consumer -- -- Returns: -- Batch ID or NULL if there are no more events available. -- ---------------------------------------------------------------------- declare res int8; begin select batch_id into res from pgq.next_batch_info(i_queue_name, i_consumer_name); return res; end; $$ language plpgsql; create or replace function pgq.next_batch_custom( in i_queue_name text, in i_consumer_name text, in i_min_lag interval, in i_min_count int4, in i_min_interval interval, out batch_id int8, out cur_tick_id int8, out prev_tick_id int8, out cur_tick_time timestamptz, out prev_tick_time timestamptz, out cur_tick_event_seq int8, out prev_tick_event_seq int8) as $$ -- ---------------------------------------------------------------------- -- Function: pgq.next_batch_custom(5) -- -- Makes next block of events active. Block size can be tuned -- with i_min_count, i_min_interval parameters. Events age can -- be tuned with i_min_lag. -- -- If it returns NULL, there is no events available in queue. -- Consumer should sleep then. -- -- The values from event_id sequence may give hint how big the -- batch may be. But they are inexact, they do not give exact size. -- Client *MUST NOT* use them to detect whether the batch contains any -- events at all - the values are unfit for that purpose. -- -- Note: -- i_min_lag together with i_min_interval/i_min_count is inefficient. -- -- Parameters: -- i_queue_name - Name of the queue -- i_consumer_name - Name of the consumer -- i_min_lag - Consumer wants events older than that -- i_min_count - Consumer wants batch to contain at least this many events -- i_min_interval - Consumer wants batch to cover at least this much time -- -- Returns: -- batch_id - Batch ID or NULL if there are no more events available. -- cur_tick_id - End tick id. -- cur_tick_time - End tick time. -- cur_tick_event_seq - Value from event id sequence at the time tick was issued. -- prev_tick_id - Start tick id. -- prev_tick_time - Start tick time. -- prev_tick_event_seq - value from event id sequence at the time tick was issued. -- ---------------------------------------------------------------------- declare errmsg text; queue_id integer; sub_id integer; cons_id integer; begin select s.sub_queue, s.sub_consumer, s.sub_id, s.sub_batch, t1.tick_id, t1.tick_time, t1.tick_event_seq, t2.tick_id, t2.tick_time, t2.tick_event_seq into queue_id, cons_id, sub_id, batch_id, prev_tick_id, prev_tick_time, prev_tick_event_seq, cur_tick_id, cur_tick_time, cur_tick_event_seq from pgq.consumer c, pgq.queue q, pgq.subscription s left join pgq.tick t1 on (t1.tick_queue = s.sub_queue and t1.tick_id = s.sub_last_tick) left join pgq.tick t2 on (t2.tick_queue = s.sub_queue and t2.tick_id = s.sub_next_tick) where q.queue_name = i_queue_name and c.co_name = i_consumer_name and s.sub_queue = q.queue_id and s.sub_consumer = c.co_id; if not found then errmsg := 'Not subscriber to queue: ' || coalesce(i_queue_name, 'NULL') || '/' || coalesce(i_consumer_name, 'NULL'); raise exception '%', errmsg; end if; -- sanity check if prev_tick_id is null then raise exception 'PgQ corruption: Consumer % on queue % does not see tick %', i_consumer_name, i_queue_name, prev_tick_id; end if; -- has already active batch if batch_id is not null then return; end if; if i_min_interval is null and i_min_count is null then -- find next tick select tick_id, tick_time, tick_event_seq into cur_tick_id, cur_tick_time, cur_tick_event_seq from pgq.tick where tick_id > prev_tick_id and tick_queue = queue_id order by tick_queue asc, tick_id asc limit 1; else -- find custom tick select next_tick_id, next_tick_time, next_tick_seq into cur_tick_id, cur_tick_time, cur_tick_event_seq from pgq.find_tick_helper(queue_id, prev_tick_id, prev_tick_time, prev_tick_event_seq, i_min_count, i_min_interval); end if; if i_min_lag is not null then -- enforce min lag if now() - cur_tick_time < i_min_lag then cur_tick_id := NULL; cur_tick_time := NULL; cur_tick_event_seq := NULL; end if; end if; if cur_tick_id is null then -- nothing to do prev_tick_id := null; prev_tick_time := null; prev_tick_event_seq := null; return; end if; -- get next batch batch_id := nextval('pgq.batch_id_seq'); update pgq.subscription set sub_batch = batch_id, sub_next_tick = cur_tick_id, sub_active = now() where sub_queue = queue_id and sub_consumer = cons_id; return; end; $$ language plpgsql security definer; create or replace function pgq.get_batch_events( in x_batch_id bigint, out ev_id bigint, out ev_time timestamptz, out ev_txid bigint, out ev_retry int4, out ev_type text, out ev_data text, out ev_extra1 text, out ev_extra2 text, out ev_extra3 text, out ev_extra4 text) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_batch_events(1) -- -- Get all events in batch. -- -- Parameters: -- x_batch_id - ID of active batch. -- -- Returns: -- List of events. -- ---------------------------------------------------------------------- declare sql text; begin sql := pgq.batch_event_sql(x_batch_id); for ev_id, ev_time, ev_txid, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4 in execute sql loop return next; end loop; return; end; $$ language plpgsql; -- no perms needed create or replace function pgq.get_batch_cursor( in i_batch_id bigint, in i_cursor_name text, in i_quick_limit int4, in i_extra_where text, out ev_id bigint, out ev_time timestamptz, out ev_txid bigint, out ev_retry int4, out ev_type text, out ev_data text, out ev_extra1 text, out ev_extra2 text, out ev_extra3 text, out ev_extra4 text) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_batch_cursor(4) -- -- Get events in batch using a cursor. -- -- Parameters: -- i_batch_id - ID of active batch. -- i_cursor_name - Name for new cursor -- i_quick_limit - Number of events to return immediately -- i_extra_where - optional where clause to filter events -- -- Returns: -- List of events. -- ---------------------------------------------------------------------- declare _cname text; _sql text; begin if i_batch_id is null or i_cursor_name is null or i_quick_limit is null then return; end if; _cname := quote_ident(i_cursor_name); _sql := pgq.batch_event_sql(i_batch_id); -- apply extra where if i_extra_where is not null then _sql := replace(_sql, ' order by 1', ''); _sql := 'select * from (' || _sql || ') _evs where ' || i_extra_where || ' order by 1'; end if; -- create cursor execute 'declare ' || _cname || ' no scroll cursor for ' || _sql; -- if no events wanted, don't bother with execute if i_quick_limit <= 0 then return; end if; -- return first block of events for ev_id, ev_time, ev_txid, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4 in execute 'fetch ' || i_quick_limit::text || ' from ' || _cname loop return next; end loop; return; end; $$ language plpgsql; -- no perms needed create or replace function pgq.get_batch_cursor( in i_batch_id bigint, in i_cursor_name text, in i_quick_limit int4, out ev_id bigint, out ev_time timestamptz, out ev_txid bigint, out ev_retry int4, out ev_type text, out ev_data text, out ev_extra1 text, out ev_extra2 text, out ev_extra3 text, out ev_extra4 text) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_batch_cursor(3) -- -- Get events in batch using a cursor. -- -- Parameters: -- i_batch_id - ID of active batch. -- i_cursor_name - Name for new cursor -- i_quick_limit - Number of events to return immediately -- -- Returns: -- List of events. -- ---------------------------------------------------------------------- begin for ev_id, ev_time, ev_txid, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4 in select * from pgq.get_batch_cursor(i_batch_id, i_cursor_name, i_quick_limit, null) loop return next; end loop; return; end; $$ language plpgsql strict; -- no perms needed create or replace function pgq.event_retry( x_batch_id bigint, x_event_id bigint, x_retry_time timestamptz) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.event_retry(3) -- -- Put the event into retry queue, to be processed again later. -- -- Parameters: -- x_batch_id - ID of active batch. -- x_event_id - event id -- x_retry_time - Time when the event should be put back into queue -- -- Returns: -- nothing -- ---------------------------------------------------------------------- begin insert into pgq.retry_queue (ev_retry_after, ev_queue, ev_id, ev_time, ev_txid, ev_owner, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4) select x_retry_time, sub_queue, ev_id, ev_time, NULL, sub_id, coalesce(ev_retry, 0) + 1, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4 from pgq.get_batch_events(x_batch_id), pgq.subscription where sub_batch = x_batch_id and ev_id = x_event_id; if not found then raise exception 'event not found'; end if; return 1; -- dont worry if the event is already in queue exception when unique_violation then return 0; end; $$ language plpgsql security definer; create or replace function pgq.event_retry( x_batch_id bigint, x_event_id bigint, x_retry_seconds integer) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.event_retry(3) -- -- Put the event into retry queue, to be processed later again. -- -- Parameters: -- x_batch_id - ID of active batch. -- x_event_id - event id -- x_retry_seconds - Time when the event should be put back into queue -- -- Returns: -- nothing -- ---------------------------------------------------------------------- declare new_retry timestamptz; begin new_retry := current_timestamp + ((x_retry_seconds::text || ' seconds')::interval); return pgq.event_retry(x_batch_id, x_event_id, new_retry); end; $$ language plpgsql security definer; create or replace function pgq.batch_retry( i_batch_id bigint, i_retry_seconds integer) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.batch_retry(2) -- -- Put whole batch into retry queue, to be processed again later. -- -- Parameters: -- i_batch_id - ID of active batch. -- i_retry_time - Time when the event should be put back into queue -- -- Returns: -- nothing -- ---------------------------------------------------------------------- declare _retry timestamptz; _cnt integer; _s record; begin _retry := current_timestamp + ((i_retry_seconds::text || ' seconds')::interval); select * into _s from pgq.subscription where sub_batch = i_batch_id; if not found then raise exception 'batch_retry: batch % not found', i_batch_id; end if; insert into pgq.retry_queue (ev_retry_after, ev_queue, ev_id, ev_time, ev_txid, ev_owner, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4) select distinct _retry, _s.sub_queue, b.ev_id, b.ev_time, NULL::int8, _s.sub_id, coalesce(b.ev_retry, 0) + 1, b.ev_type, b.ev_data, b.ev_extra1, b.ev_extra2, b.ev_extra3, b.ev_extra4 from pgq.get_batch_events(i_batch_id) b left join pgq.retry_queue rq on (rq.ev_id = b.ev_id and rq.ev_owner = _s.sub_id and rq.ev_queue = _s.sub_queue) where rq.ev_id is null; GET DIAGNOSTICS _cnt = ROW_COUNT; return _cnt; end; $$ language plpgsql security definer; create or replace function pgq.finish_batch( x_batch_id bigint) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.finish_batch(1) -- -- Closes a batch. No more operations can be done with events -- of this batch. -- -- Parameters: -- x_batch_id - id of batch. -- -- Returns: -- If batch 1 if batch was found, 0 otherwise. -- ---------------------------------------------------------------------- begin update pgq.subscription set sub_active = now(), sub_last_tick = sub_next_tick, sub_next_tick = null, sub_batch = null where sub_batch = x_batch_id; if not found then raise warning 'finish_batch: batch % not found', x_batch_id; return 0; end if; return 1; end; $$ language plpgsql security definer; -- Group: General info functions drop function if exists pgq.get_queue_info(); drop function if exists pgq.get_queue_info(text); create or replace function pgq.get_queue_info( out queue_name text, out queue_ntables integer, out queue_cur_table integer, out queue_rotation_period interval, out queue_switch_time timestamptz, out queue_external_ticker boolean, out queue_ticker_paused boolean, out queue_ticker_max_count integer, out queue_ticker_max_lag interval, out queue_ticker_idle_period interval, out ticker_lag interval, out ev_per_sec float8, out ev_new bigint, out last_tick_id bigint) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_queue_info(0) -- -- Get info about all queues. -- -- Returns: -- List of pgq.ret_queue_info records. -- ---------------------------------------------------------------------- begin for queue_name, queue_ntables, queue_cur_table, queue_rotation_period, queue_switch_time, queue_external_ticker, queue_ticker_paused, queue_ticker_max_count, queue_ticker_max_lag, queue_ticker_idle_period, ticker_lag, ev_per_sec, ev_new, last_tick_id in select f.queue_name, f.queue_ntables, f.queue_cur_table, f.queue_rotation_period, f.queue_switch_time, f.queue_external_ticker, f.queue_ticker_paused, f.queue_ticker_max_count, f.queue_ticker_max_lag, f.queue_ticker_idle_period, f.ticker_lag, f.ev_per_sec, f.ev_new, f.last_tick_id from pgq.get_queue_info(null) f loop return next; end loop; return; end; $$ language plpgsql; create or replace function pgq.get_queue_info( in i_queue_name text, out queue_name text, out queue_ntables integer, out queue_cur_table integer, out queue_rotation_period interval, out queue_switch_time timestamptz, out queue_external_ticker boolean, out queue_ticker_paused boolean, out queue_ticker_max_count integer, out queue_ticker_max_lag interval, out queue_ticker_idle_period interval, out ticker_lag interval, out ev_per_sec float8, out ev_new bigint, out last_tick_id bigint) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_queue_info(1) -- -- Get info about particular queue. -- -- Returns: -- One pgq.ret_queue_info record. -- ---------------------------------------------------------------------- declare _ticker_lag interval; _top_tick_id bigint; _ht_tick_id bigint; _top_tick_time timestamptz; _top_tick_event_seq bigint; _ht_tick_time timestamptz; _ht_tick_event_seq bigint; _queue_id integer; _queue_event_seq text; begin for queue_name, queue_ntables, queue_cur_table, queue_rotation_period, queue_switch_time, queue_external_ticker, queue_ticker_paused, queue_ticker_max_count, queue_ticker_max_lag, queue_ticker_idle_period, _queue_id, _queue_event_seq in select q.queue_name, q.queue_ntables, q.queue_cur_table, q.queue_rotation_period, q.queue_switch_time, q.queue_external_ticker, q.queue_ticker_paused, q.queue_ticker_max_count, q.queue_ticker_max_lag, q.queue_ticker_idle_period, q.queue_id, q.queue_event_seq from pgq.queue q where (i_queue_name is null or q.queue_name = i_queue_name) order by q.queue_name loop -- most recent tick select (current_timestamp - t.tick_time), tick_id, t.tick_time, t.tick_event_seq into ticker_lag, _top_tick_id, _top_tick_time, _top_tick_event_seq from pgq.tick t where t.tick_queue = _queue_id order by t.tick_queue desc, t.tick_id desc limit 1; -- slightly older tick select ht.tick_id, ht.tick_time, ht.tick_event_seq into _ht_tick_id, _ht_tick_time, _ht_tick_event_seq from pgq.tick ht where ht.tick_queue = _queue_id and ht.tick_id >= _top_tick_id - 20 order by ht.tick_queue asc, ht.tick_id asc limit 1; if _ht_tick_time < _top_tick_time then ev_per_sec = (_top_tick_event_seq - _ht_tick_event_seq) / extract(epoch from (_top_tick_time - _ht_tick_time)); else ev_per_sec = null; end if; ev_new = pgq.seq_getval(_queue_event_seq) - _top_tick_event_seq; last_tick_id = _top_tick_id; return next; end loop; return; end; $$ language plpgsql; create or replace function pgq.get_consumer_info( out queue_name text, out consumer_name text, out lag interval, out last_seen interval, out last_tick bigint, out current_batch bigint, out next_tick bigint, out pending_events bigint) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_consumer_info(0) -- -- Returns info about all consumers on all queues. -- -- Returns: -- See pgq.get_consumer_info(2) -- ---------------------------------------------------------------------- begin for queue_name, consumer_name, lag, last_seen, last_tick, current_batch, next_tick, pending_events in select f.queue_name, f.consumer_name, f.lag, f.last_seen, f.last_tick, f.current_batch, f.next_tick, f.pending_events from pgq.get_consumer_info(null, null) f loop return next; end loop; return; end; $$ language plpgsql security definer; create or replace function pgq.get_consumer_info( in i_queue_name text, out queue_name text, out consumer_name text, out lag interval, out last_seen interval, out last_tick bigint, out current_batch bigint, out next_tick bigint, out pending_events bigint) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_consumer_info(1) -- -- Returns info about all consumers on single queue. -- -- Returns: -- See pgq.get_consumer_info(2) -- ---------------------------------------------------------------------- begin for queue_name, consumer_name, lag, last_seen, last_tick, current_batch, next_tick, pending_events in select f.queue_name, f.consumer_name, f.lag, f.last_seen, f.last_tick, f.current_batch, f.next_tick, f.pending_events from pgq.get_consumer_info(i_queue_name, null) f loop return next; end loop; return; end; $$ language plpgsql security definer; create or replace function pgq.get_consumer_info( in i_queue_name text, in i_consumer_name text, out queue_name text, out consumer_name text, out lag interval, out last_seen interval, out last_tick bigint, out current_batch bigint, out next_tick bigint, out pending_events bigint) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_consumer_info(2) -- -- Get info about particular consumer on particular queue. -- -- Parameters: -- i_queue_name - name of a queue. (null = all) -- i_consumer_name - name of a consumer (null = all) -- -- Returns: -- queue_name - Queue name -- consumer_name - Consumer name -- lag - How old are events the consumer is processing -- last_seen - When the consumer seen by pgq -- last_tick - Tick ID of last processed tick -- current_batch - Current batch ID, if one is active or NULL -- next_tick - If batch is active, then its final tick. -- ---------------------------------------------------------------------- declare _pending_events bigint; _queue_id bigint; begin for queue_name, consumer_name, lag, last_seen, last_tick, current_batch, next_tick, _pending_events, _queue_id in select q.queue_name, c.co_name, current_timestamp - t.tick_time, current_timestamp - s.sub_active, s.sub_last_tick, s.sub_batch, s.sub_next_tick, t.tick_event_seq, q.queue_id from pgq.queue q, pgq.consumer c, pgq.subscription s left join pgq.tick t on (t.tick_queue = s.sub_queue and t.tick_id = s.sub_last_tick) where q.queue_id = s.sub_queue and c.co_id = s.sub_consumer and (i_queue_name is null or q.queue_name = i_queue_name) and (i_consumer_name is null or c.co_name = i_consumer_name) order by 1,2 loop select t.tick_event_seq - _pending_events into pending_events from pgq.tick t where t.tick_queue = _queue_id order by t.tick_queue desc, t.tick_id desc limit 1; return next; end loop; return; end; $$ language plpgsql security definer; create or replace function pgq.version() returns text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.version(0) -- -- Returns verison string for pgq. ATM its SkyTools version -- that is only bumped when PGQ database code changes. -- ---------------------------------------------------------------------- begin return '3.0.0.9'; end; $$ language plpgsql; create or replace function pgq.get_batch_info( in x_batch_id bigint, out queue_name text, out consumer_name text, out batch_start timestamptz, out batch_end timestamptz, out prev_tick_id bigint, out tick_id bigint, out lag interval, out seq_start bigint, out seq_end bigint) as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_batch_info(1) -- -- Returns detailed info about a batch. -- -- Parameters: -- x_batch_id - id of a active batch. -- -- Returns: -- Info -- ---------------------------------------------------------------------- begin select q.queue_name, c.co_name, prev.tick_time, cur.tick_time, s.sub_last_tick, s.sub_next_tick, current_timestamp - cur.tick_time, prev.tick_event_seq, cur.tick_event_seq into queue_name, consumer_name, batch_start, batch_end, prev_tick_id, tick_id, lag, seq_start, seq_end from pgq.subscription s, pgq.tick cur, pgq.tick prev, pgq.queue q, pgq.consumer c where s.sub_batch = x_batch_id and prev.tick_id = s.sub_last_tick and prev.tick_queue = s.sub_queue and cur.tick_id = s.sub_next_tick and cur.tick_queue = s.sub_queue and q.queue_id = s.sub_queue and c.co_id = s.sub_consumer; return; end; $$ language plpgsql security definer; end; skytools-3.2.6/upgrade/final/pgq_core_2.1.13.sql0000644000000000000000000021024612426435645016161 0ustar -- ---------------------------------------------------------------------- -- Section: Internal Tables -- -- Overview: -- pgq.queue - Queue configuration -- pgq.consumer - Consumer names -- pgq.subscription - Consumer registrations -- pgq.tick - Per-queue snapshots (ticks) -- pgq.event_* - Data tables -- pgq.retry_queue - Events to be retried later -- pgq.failed_queue - Events whose processing failed -- -- Its basically generalized and simplified Slony-I structure: -- sl_node - pgq.consumer -- sl_set - pgq.queue -- sl_subscriber + sl_confirm - pgq.subscription -- sl_event - pgq.tick -- sl_setsync - pgq_ext.completed_* -- sl_log_* - slony1 has per-cluster data tables, -- pgq has per-queue data tables. -- ---------------------------------------------------------------------- set client_min_messages = 'warning'; set default_with_oids = 'off'; -- drop schema if exists pgq cascade; create schema pgq; -- ---------------------------------------------------------------------- -- Table: pgq.consumer -- -- Name to id lookup for consumers -- -- Columns: -- co_id - consumer's id for internal usage -- co_name - consumer's id for external usage -- ---------------------------------------------------------------------- create table pgq.consumer ( co_id serial, co_name text not null default 'fooz', constraint consumer_pkey primary key (co_id), constraint consumer_name_uq UNIQUE (co_name) ); -- ---------------------------------------------------------------------- -- Table: pgq.queue -- -- Information about available queues -- -- Columns: -- queue_id - queue id for internal usage -- queue_name - queue name visible outside -- queue_ntables - how many data tables the queue has -- queue_cur_table - which data table is currently active -- queue_rotation_period - period for data table rotation -- queue_switch_step1 - tx when rotation happened -- queue_switch_step2 - tx after rotation was committed -- queue_switch_time - time when switch happened -- queue_external_ticker - ticks come from some external sources -- queue_ticker_max_count - batch should not contain more events -- queue_ticker_max_lag - events should not age more -- queue_ticker_idle_period - how often to tick when no events happen -- queue_data_pfx - prefix for data table names -- queue_event_seq - sequence for event id's -- queue_tick_seq - sequence for tick id's -- ---------------------------------------------------------------------- create table pgq.queue ( queue_id serial, queue_name text not null, queue_ntables integer not null default 3, queue_cur_table integer not null default 0, queue_rotation_period interval not null default '2 hours', queue_switch_step1 bigint not null default txid_current(), queue_switch_step2 bigint default txid_current(), queue_switch_time timestamptz not null default now(), queue_external_ticker boolean not null default false, queue_ticker_max_count integer not null default 500, queue_ticker_max_lag interval not null default '3 seconds', queue_ticker_idle_period interval not null default '1 minute', queue_data_pfx text not null, queue_event_seq text not null, queue_tick_seq text not null, constraint queue_pkey primary key (queue_id), constraint queue_name_uq unique (queue_name) ); -- ---------------------------------------------------------------------- -- Table: pgq.tick -- -- Snapshots for event batching -- -- Columns: -- tick_queue - queue id whose tick it is -- tick_id - ticks id (per-queue) -- tick_time - time when tick happened -- tick_snapshot - transaction state -- ---------------------------------------------------------------------- create table pgq.tick ( tick_queue int4 not null, tick_id bigint not null, tick_time timestamptz not null default now(), tick_snapshot txid_snapshot not null default txid_current_snapshot(), constraint tick_pkey primary key (tick_queue, tick_id), constraint tick_queue_fkey foreign key (tick_queue) references pgq.queue (queue_id) ); -- ---------------------------------------------------------------------- -- Sequence: pgq.batch_id_seq -- -- Sequence for batch id's. -- ---------------------------------------------------------------------- create sequence pgq.batch_id_seq; -- ---------------------------------------------------------------------- -- Table: pgq.subscription -- -- Consumer registration on a queue. -- -- Columns: -- -- sub_id - subscription id for internal usage -- sub_queue - queue id -- sub_consumer - consumer's id -- sub_last_tick - last tick the consumer processed -- sub_batch - shortcut for queue_id/consumer_id/tick_id -- sub_next_tick - batch end pos -- ---------------------------------------------------------------------- create table pgq.subscription ( sub_id serial not null, sub_queue int4 not null, sub_consumer int4 not null, sub_last_tick bigint not null, sub_active timestamptz not null default now(), sub_batch bigint, sub_next_tick bigint, constraint subscription_pkey primary key (sub_id), constraint subscription_ukey unique (sub_queue, sub_consumer), constraint sub_queue_fkey foreign key (sub_queue) references pgq.queue (queue_id), constraint sub_consumer_fkey foreign key (sub_consumer) references pgq.consumer (co_id) ); -- ---------------------------------------------------------------------- -- Table: pgq.event_template -- -- Parent table for all event tables -- -- Columns: -- ev_id - event's id, supposed to be unique per queue -- ev_time - when the event was inserted -- ev_txid - transaction id which inserted the event -- ev_owner - subscription id that wanted to retry this -- ev_retry - how many times the event has been retried, NULL for new events -- ev_type - consumer/producer can specify what the data fields contain -- ev_data - data field -- ev_extra1 - extra data field -- ev_extra2 - extra data field -- ev_extra3 - extra data field -- ev_extra4 - extra data field -- ---------------------------------------------------------------------- create table pgq.event_template ( ev_id bigint not null, ev_time timestamptz not null, ev_txid bigint not null default txid_current(), ev_owner int4, ev_retry int4, ev_type text, ev_data text, ev_extra1 text, ev_extra2 text, ev_extra3 text, ev_extra4 text ); -- ---------------------------------------------------------------------- -- Table: pgq.retry_queue -- -- Events to be retried. When retry time reaches, they will -- be put back into main queue. -- -- Columns: -- ev_retry_after - time when it should be re-inserted to main queue -- * - same as pgq.event_template -- ---------------------------------------------------------------------- create table pgq.retry_queue ( ev_retry_after timestamptz not null, like pgq.event_template, constraint rq_pkey primary key (ev_owner, ev_id), constraint rq_owner_fkey foreign key (ev_owner) references pgq.subscription (sub_id) ); alter table pgq.retry_queue alter column ev_owner set not null; alter table pgq.retry_queue alter column ev_txid drop not null; create index rq_retry_idx on pgq.retry_queue (ev_retry_after); create index rq_retry_owner_idx on pgq.retry_queue (ev_owner, ev_id); -- ---------------------------------------------------------------------- -- Table: pgq.failed_queue -- -- Events whose processing failed. -- -- Columns: -- ev_failed_reason - consumer's excuse for not processing -- ev_failed_time - when it was tagged failed -- * - same as pgq.event_template -- ---------------------------------------------------------------------- create table pgq.failed_queue ( ev_failed_reason text, ev_failed_time timestamptz not null, -- all event fields like pgq.event_template, constraint fq_pkey primary key (ev_owner, ev_id), constraint fq_owner_fkey foreign key (ev_owner) references pgq.subscription (sub_id) ); alter table pgq.failed_queue alter column ev_owner set not null; alter table pgq.failed_queue alter column ev_txid drop not null; grant usage on schema pgq to public; grant select on table pgq.consumer to public; grant select on table pgq.queue to public; grant select on table pgq.tick to public; grant select on table pgq.queue to public; grant select on table pgq.subscription to public; grant select on table pgq.event_template to public; grant select on table pgq.retry_queue to public; grant select on table pgq.failed_queue to public; create type pgq.ret_queue_info as ( queue_name text, queue_ntables integer, queue_cur_table integer, queue_rotation_period interval, queue_switch_time timestamptz, queue_external_ticker boolean, queue_ticker_max_count integer, queue_ticker_max_lag interval, queue_ticker_idle_period interval, ticker_lag interval ); create type pgq.ret_consumer_info as ( queue_name text, consumer_name text, lag interval, last_seen interval, last_tick bigint, current_batch bigint, next_tick bigint ); create type pgq.ret_batch_info as ( queue_name text, consumer_name text, batch_start timestamptz, batch_end timestamptz, prev_tick_id bigint, tick_id bigint, lag interval ); create type pgq.ret_batch_event as ( ev_id bigint, ev_time timestamptz, ev_txid bigint, ev_retry int4, ev_type text, ev_data text, ev_extra1 text, ev_extra2 text, ev_extra3 text, ev_extra4 text ); -- Section: Internal Functions -- Group: Low-level event handling create or replace function pgq.batch_event_sql(x_batch_id bigint) returns text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.batch_event_sql(1) -- Creates SELECT statement that fetches events for this batch. -- -- Parameters: -- x_batch_id - ID of a active batch. -- -- Returns: -- SQL statement. -- ---------------------------------------------------------------------- -- ---------------------------------------------------------------------- -- Algorithm description: -- Given 2 snapshots, sn1 and sn2 with sn1 having xmin1, xmax1 -- and sn2 having xmin2, xmax2 create expression that filters -- right txid's from event table. -- -- Simplest solution would be -- > WHERE ev_txid >= xmin1 AND ev_txid <= xmax2 -- > AND NOT txid_visible_in_snapshot(ev_txid, sn1) -- > AND txid_visible_in_snapshot(ev_txid, sn2) -- -- The simple solution has a problem with long transactions (xmin1 very low). -- All the batches that happen when the long tx is active will need -- to scan all events in that range. Here is 2 optimizations used: -- -- 1) Use [xmax1..xmax2] for range scan. That limits the range to -- txids that actually happened between two snapshots. For txids -- in the range [xmin1..xmax1] look which ones were actually -- committed between snapshots and search for them using exact -- values using IN (..) list. -- -- 2) As most TX are short, there could be lot of them that were -- just below xmax1, but were committed before xmax2. So look -- if there are ID's near xmax1 and lower the range to include -- them, thus decresing size of IN (..) list. -- ---------------------------------------------------------------------- declare rec record; sql text; tbl text; arr text; part text; select_fields text; retry_expr text; batch record; begin select s.sub_last_tick, s.sub_next_tick, s.sub_id, s.sub_queue, txid_snapshot_xmax(last.tick_snapshot) as tx_start, txid_snapshot_xmax(cur.tick_snapshot) as tx_end, last.tick_snapshot as last_snapshot, cur.tick_snapshot as cur_snapshot into batch from pgq.subscription s, pgq.tick last, pgq.tick cur where s.sub_batch = x_batch_id and last.tick_queue = s.sub_queue and last.tick_id = s.sub_last_tick and cur.tick_queue = s.sub_queue and cur.tick_id = s.sub_next_tick; if not found then raise exception 'batch not found'; end if; -- load older transactions arr := ''; for rec in -- active tx-es in prev_snapshot that were committed in cur_snapshot select id1 from txid_snapshot_xip(batch.last_snapshot) id1 left join txid_snapshot_xip(batch.cur_snapshot) id2 on (id1 = id2) where id2 is null order by 1 desc loop -- try to avoid big IN expression, so try to include nearby -- tx'es into range if batch.tx_start - 100 <= rec.id1 then batch.tx_start := rec.id1; else if arr = '' then arr := rec.id1; else arr := arr || ',' || rec.id1; end if; end if; end loop; -- must match pgq.event_template select_fields := 'select ev_id, ev_time, ev_txid, ev_retry, ev_type,' || ' ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4'; retry_expr := ' and (ev_owner is null or ev_owner = ' || batch.sub_id || ')'; -- now generate query that goes over all potential tables sql := ''; for rec in select xtbl from pgq.batch_event_tables(x_batch_id) xtbl loop tbl := rec.xtbl; -- this gets newer queries that definitely are not in prev_snapshot part := select_fields || ' from pgq.tick cur, pgq.tick last, ' || tbl || ' ev ' || ' where cur.tick_id = ' || batch.sub_next_tick || ' and cur.tick_queue = ' || batch.sub_queue || ' and last.tick_id = ' || batch.sub_last_tick || ' and last.tick_queue = ' || batch.sub_queue || ' and ev.ev_txid >= ' || batch.tx_start || ' and ev.ev_txid <= ' || batch.tx_end || ' and txid_visible_in_snapshot(ev.ev_txid, cur.tick_snapshot)' || ' and not txid_visible_in_snapshot(ev.ev_txid, last.tick_snapshot)' || retry_expr; -- now include older tx-es, that were ongoing -- at the time of prev_snapshot if arr <> '' then part := part || ' union all ' || select_fields || ' from ' || tbl || ' ev ' || ' where ev.ev_txid in (' || arr || ')' || retry_expr; end if; if sql = '' then sql := part; else sql := sql || ' union all ' || part; end if; end loop; if sql = '' then raise exception 'could not construct sql for batch %', x_batch_id; end if; return sql || ' order by 1'; end; $$ language plpgsql; -- no perms needed create or replace function pgq.batch_event_tables(x_batch_id bigint) returns setof text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.batch_event_tables(1) -- -- Returns set of table names where this batch events may reside. -- -- Parameters: -- x_batch_id - ID of a active batch. -- ---------------------------------------------------------------------- declare nr integer; tbl text; use_prev integer; use_next integer; batch record; begin select txid_snapshot_xmin(last.tick_snapshot) as tx_min, -- absolute minimum txid_snapshot_xmax(cur.tick_snapshot) as tx_max, -- absolute maximum q.queue_data_pfx, q.queue_ntables, q.queue_cur_table, q.queue_switch_step1, q.queue_switch_step2 into batch from pgq.tick last, pgq.tick cur, pgq.subscription s, pgq.queue q where cur.tick_id = s.sub_next_tick and cur.tick_queue = s.sub_queue and last.tick_id = s.sub_last_tick and last.tick_queue = s.sub_queue and s.sub_batch = x_batch_id and q.queue_id = s.sub_queue; if not found then raise exception 'Cannot find data for batch %', x_batch_id; end if; -- if its definitely not in one or other, look into both if batch.tx_max < batch.queue_switch_step1 then use_prev := 1; use_next := 0; elsif batch.queue_switch_step2 is not null and (batch.tx_min > batch.queue_switch_step2) then use_prev := 0; use_next := 1; else use_prev := 1; use_next := 1; end if; if use_prev then nr := batch.queue_cur_table - 1; if nr < 0 then nr := batch.queue_ntables - 1; end if; tbl := batch.queue_data_pfx || '_' || nr; return next tbl; end if; if use_next then tbl := batch.queue_data_pfx || '_' || batch.queue_cur_table; return next tbl; end if; return; end; $$ language plpgsql; -- no perms needed create or replace function pgq.event_retry_raw( x_queue text, x_consumer text, x_retry_after timestamptz, x_ev_id bigint, x_ev_time timestamptz, x_ev_retry integer, x_ev_type text, x_ev_data text, x_ev_extra1 text, x_ev_extra2 text, x_ev_extra3 text, x_ev_extra4 text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.event_retry_raw(12) -- -- Allows full control over what goes to retry queue. -- -- Parameters: -- x_queue - name of the queue -- x_consumer - name of the consumer -- x_retry_after - when the event should be processed again -- x_ev_id - event id -- x_ev_time - creation time -- x_ev_retry - retry count -- x_ev_type - user data -- x_ev_data - user data -- x_ev_extra1 - user data -- x_ev_extra2 - user data -- x_ev_extra3 - user data -- x_ev_extra4 - user data -- -- Returns: -- Event ID. -- ---------------------------------------------------------------------- declare q record; id bigint; begin select sub_id, queue_event_seq into q from pgq.consumer, pgq.queue, pgq.subscription where queue_name = x_queue and co_name = x_consumer and sub_consumer = co_id and sub_queue = queue_id; if not found then raise exception 'consumer not registered'; end if; id := x_ev_id; if id is null then id := nextval(q.queue_event_seq); end if; insert into pgq.retry_queue (ev_retry_after, ev_id, ev_time, ev_owner, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4) values (x_retry_after, x_ev_id, x_ev_time, q.sub_id, x_ev_retry, x_ev_type, x_ev_data, x_ev_extra1, x_ev_extra2, x_ev_extra3, x_ev_extra4); return id; end; $$ language plpgsql security definer; -- \i functions/pgq.insert_event_raw.sql -- ---------------------------------------------------------------------- -- Function: pgq.insert_event_raw(11) -- -- Actual event insertion. Used also by retry queue maintenance. -- -- Parameters: -- queue_name - Name of the queue -- ev_id - Event ID. If NULL, will be taken from seq. -- ev_time - Event creation time. -- ev_owner - Subscription ID when retry event. If NULL, the event is for everybody. -- ev_retry - Retry count. NULL for first-time events. -- ev_type - user data -- ev_data - user data -- ev_extra1 - user data -- ev_extra2 - user data -- ev_extra3 - user data -- ev_extra4 - user data -- -- Returns: -- Event ID. -- ---------------------------------------------------------------------- CREATE OR REPLACE FUNCTION pgq.insert_event_raw( queue_name text, ev_id bigint, ev_time timestamptz, ev_owner integer, ev_retry integer, ev_type text, ev_data text, ev_extra1 text, ev_extra2 text, ev_extra3 text, ev_extra4 text) RETURNS int8 AS '$libdir/pgq_lowlevel', 'pgq_insert_event_raw' LANGUAGE C; -- Group: Ticker create or replace function pgq.ticker(i_queue_name text, i_tick_id bigint) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.ticker(2) -- -- Insert a tick with a particular tick_id. -- -- For external tickers. -- -- Parameters: -- i_queue_name - Name of the queue -- i_tick_id - Id of new tick. -- -- Returns: -- Tick id. -- ---------------------------------------------------------------------- begin insert into pgq.tick (tick_queue, tick_id) select queue_id, i_tick_id from pgq.queue where queue_name = i_queue_name and queue_external_ticker; if not found then raise exception 'queue not found'; end if; return i_tick_id; end; $$ language plpgsql security definer; -- unsure about access create or replace function pgq.ticker(i_queue_name text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.ticker(1) -- -- Insert a tick with a tick_id from sequence. -- -- For pgqadm usage. -- -- Parameters: -- i_queue_name - Name of the queue -- -- Returns: -- Tick id. -- ---------------------------------------------------------------------- declare res bigint; ext boolean; seq text; q record; begin select queue_id, queue_tick_seq, queue_external_ticker into q from pgq.queue where queue_name = i_queue_name; if not found then raise exception 'no such queue'; end if; if q.queue_external_ticker then raise exception 'This queue has external tick source.'; end if; insert into pgq.tick (tick_queue, tick_id) values (q.queue_id, nextval(q.queue_tick_seq)); res = currval(q.queue_tick_seq); return res; end; $$ language plpgsql security definer; -- unsure about access create or replace function pgq.ticker() returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.ticker(0) -- -- Creates ticks for all queues which dont have external ticker. -- -- Returns: -- Number of queues that were processed. -- ---------------------------------------------------------------------- declare res bigint; begin select count(pgq.ticker(queue_name)) into res from pgq.queue where not queue_external_ticker; return res; end; $$ language plpgsql security definer; -- Group: Periodic maintenence create or replace function pgq.maint_retry_events() returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.maint_retry_events(0) -- -- Moves retry events back to main queue. -- -- It moves small amount at a time. It should be called -- until it returns 0 -- -- Returns: -- Number of events processed. -- ---------------------------------------------------------------------- declare cnt integer; rec record; begin cnt := 0; -- allow only single event mover at a time, without affecting inserts lock table pgq.retry_queue in share update exclusive mode; for rec in select queue_name, ev_id, ev_time, ev_owner, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4 from pgq.retry_queue, pgq.queue, pgq.subscription where ev_retry_after <= current_timestamp and sub_id = ev_owner and queue_id = sub_queue order by ev_retry_after limit 10 loop cnt := cnt + 1; perform pgq.insert_event_raw(rec.queue_name, rec.ev_id, rec.ev_time, rec.ev_owner, rec.ev_retry, rec.ev_type, rec.ev_data, rec.ev_extra1, rec.ev_extra2, rec.ev_extra3, rec.ev_extra4); delete from pgq.retry_queue where ev_owner = rec.ev_owner and ev_id = rec.ev_id; end loop; return cnt; end; $$ language plpgsql; -- need admin access create or replace function pgq.maint_rotate_tables_step1(i_queue_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.maint_rotate_tables_step1(1) -- -- Rotate tables for one queue. -- -- Parameters: -- i_queue_name - Name of the queue -- -- Returns: -- 1 if rotation happened, otherwise 0. -- ---------------------------------------------------------------------- declare badcnt integer; cf record; nr integer; tbl text; lowest_tick_id int8; lowest_xmin int8; begin -- check if needed and load record select * from pgq.queue into cf where queue_name = i_queue_name and queue_rotation_period is not null and queue_switch_step2 is not null and queue_switch_time + queue_rotation_period < current_timestamp for update; if not found then return 0; end if; -- find lowest tick for that queue select min(sub_last_tick) into lowest_tick_id from pgq.subscription where sub_queue = cf.queue_id; -- if some consumer exists if lowest_tick_id is not null then -- is the slowest one still on previous table? select txid_snapshot_xmin(tick_snapshot) into lowest_xmin from pgq.tick where tick_queue = cf.queue_id and tick_id = lowest_tick_id; if lowest_xmin <= cf.queue_switch_step2 then return 0; -- skip rotation then end if; end if; -- nobody on previous table, we can rotate -- calc next table number and name nr := cf.queue_cur_table + 1; if nr = cf.queue_ntables then nr := 0; end if; tbl := cf.queue_data_pfx || '_' || nr; -- there may be long lock on the table from pg_dump, -- detect it and skip rotate then begin execute 'lock table ' || tbl || ' nowait'; execute 'truncate ' || tbl; exception when lock_not_available then -- cannot truncate, skipping rotate return 0; end; -- remember the moment update pgq.queue set queue_cur_table = nr, queue_switch_time = current_timestamp, queue_switch_step1 = txid_current(), queue_switch_step2 = NULL where queue_id = cf.queue_id; -- Clean ticks by using step2 txid from previous rotation. -- That should keep all ticks for all batches that are completely -- in old table. This keeps them for longer than needed, but: -- 1. we want the pgq.tick table to be big, to avoid Postgres -- accitentally switching to seqscans on that. -- 2. that way we guarantee to consumers that they an be moved -- back on the queue at least for one rotation_period. -- (may help in disaster recovery) delete from pgq.tick where tick_queue = cf.queue_id and txid_snapshot_xmin(tick_snapshot) < cf.queue_switch_step2; return 1; end; $$ language plpgsql; -- need admin access create or replace function pgq.maint_rotate_tables_step2() returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.maint_rotate_tables_step2(0) -- -- Stores the txid when the rotation was visible. It should be -- called in separate transaction than pgq.maint_rotate_tables_step1() -- ---------------------------------------------------------------------- begin update pgq.queue set queue_switch_step2 = txid_current() where queue_switch_step2 is null; return 1; end; $$ language plpgsql; -- need admin access create or replace function pgq.maint_tables_to_vacuum() returns setof text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.maint_tables_to_vacuum(0) -- -- Returns list of tablenames that need frequent vacuuming. -- -- The goal is to avoid hardcoding them into maintenance process. -- -- Returns: -- List of table names. -- ---------------------------------------------------------------------- declare row record; begin return next 'pgq.subscription'; return next 'pgq.consumer'; return next 'pgq.queue'; return next 'pgq.tick'; return next 'pgq.retry_queue'; -- include also txid, pgq_ext and londiste tables if they exist for row in select n.nspname as scm, t.relname as tbl from pg_class t, pg_namespace n where n.oid = t.relnamespace and n.nspname = 'txid' and t.relname = 'epoch' union all select n.nspname as scm, t.relname as tbl from pg_class t, pg_namespace n where n.oid = t.relnamespace and n.nspname = 'londiste' and t.relname = 'completed' union all select n.nspname as scm, t.relname as tbl from pg_class t, pg_namespace n where n.oid = t.relnamespace and n.nspname = 'pgq_ext' and t.relname in ('completed_tick', 'completed_batch', 'completed_event', 'partial_batch') loop return next row.scm || '.' || row.tbl; end loop; return; end; $$ language plpgsql; -- Group: Random utility functions create or replace function pgq.grant_perms(x_queue_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.grant_perms(1) -- -- Make event tables readable by public. -- -- Parameters: -- x_queue_name - Name of the queue. -- -- Returns: -- nothing -- ---------------------------------------------------------------------- declare q record; i integer; tbl_perms text; seq_perms text; begin select * from pgq.queue into q where queue_name = x_queue_name; if not found then raise exception 'Queue not found'; end if; if true then -- safe, all access must go via functions seq_perms := 'select'; tbl_perms := 'select'; else -- allow ordinery users to directly insert -- to event tables. dangerous. seq_perms := 'select, update'; tbl_perms := 'select, insert'; end if; -- tick seq, normal users don't need to modify it execute 'grant ' || seq_perms || ' on ' || q.queue_tick_seq || ' to public'; -- event seq execute 'grant ' || seq_perms || ' on ' || q.queue_event_seq || ' to public'; -- parent table for events execute 'grant select on ' || q.queue_data_pfx || ' to public'; -- real event tables for i in 0 .. q.queue_ntables - 1 loop execute 'grant ' || tbl_perms || ' on ' || q.queue_data_pfx || '_' || i || ' to public'; end loop; return 1; end; $$ language plpgsql security definer; create or replace function pgq.force_tick(i_queue_name text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.force_tick(2) -- -- Simulate lots of events happening to force ticker to tick. -- -- Should be called in loop, with some delay until last tick -- changes or too much time is passed. -- -- Such function is needed because paraller calls of pgq.ticker() are -- dangerous, and cannot be protected with locks as snapshot -- is taken before locking. -- -- Parameters: -- i_queue_name - Name of the queue -- -- Returns: -- Currently last tick id. -- ---------------------------------------------------------------------- declare q record; t record; begin -- bump seq and get queue id select queue_id, setval(queue_event_seq, nextval(queue_event_seq) + queue_ticker_max_count * 2) as tmp into q from pgq.queue where queue_name = i_queue_name and not queue_external_ticker; if not found then raise exception 'queue not found or ticks not allowed'; end if; -- return last tick id select tick_id into t from pgq.tick where tick_queue = q.queue_id order by tick_queue desc, tick_id desc limit 1; return t.tick_id; end; $$ language plpgsql security definer; -- Section: Public Functions -- Group: Queue creation create or replace function pgq.create_queue(i_queue_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.create_queue(1) -- -- Creates new queue with given name. -- -- Returns: -- 0 - queue already exists -- 1 - queue created -- ---------------------------------------------------------------------- declare tblpfx text; tblname text; idxpfx text; idxname text; sql text; id integer; tick_seq text; ev_seq text; n_tables integer; begin if i_queue_name is null then raise exception 'Invalid NULL value'; end if; -- check if exists perform 1 from pgq.queue where queue_name = i_queue_name; if found then return 0; end if; -- insert event id := nextval('pgq.queue_queue_id_seq'); tblpfx := 'pgq.event_' || id; idxpfx := 'event_' || id; tick_seq := 'pgq.event_' || id || '_tick_seq'; ev_seq := 'pgq.event_' || id || '_id_seq'; insert into pgq.queue (queue_id, queue_name, queue_data_pfx, queue_event_seq, queue_tick_seq) values (id, i_queue_name, tblpfx, ev_seq, tick_seq); select queue_ntables into n_tables from pgq.queue where queue_id = id; -- create seqs execute 'CREATE SEQUENCE ' || tick_seq; execute 'CREATE SEQUENCE ' || ev_seq; -- create data tables execute 'CREATE TABLE ' || tblpfx || ' () ' || ' INHERITS (pgq.event_template)'; for i in 0 .. (n_tables - 1) loop tblname := tblpfx || '_' || i; idxname := idxpfx || '_' || i; execute 'CREATE TABLE ' || tblname || ' () ' || ' INHERITS (' || tblpfx || ')'; execute 'ALTER TABLE ' || tblname || ' ALTER COLUMN ev_id ' || ' SET DEFAULT nextval(' || quote_literal(ev_seq) || ')'; execute 'create index ' || idxname || '_txid_idx on ' || tblname || ' (ev_txid)'; end loop; perform pgq.grant_perms(i_queue_name); perform pgq.ticker(i_queue_name); return 1; end; $$ language plpgsql security definer; create or replace function pgq.drop_queue(x_queue_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.drop_queue(1) -- -- Drop queue and all associated tables. -- No consumers must be listening on the queue. -- -- ---------------------------------------------------------------------- declare tblname text; q record; num integer; begin -- check ares if x_queue_name is null then raise exception 'Invalid NULL value'; end if; -- check if exists select * into q from pgq.queue where queue_name = x_queue_name; if not found then raise exception 'No such event queue'; end if; -- check if no consumers select count(*) into num from pgq.subscription where sub_queue = q.queue_id; if num > 0 then raise exception 'cannot drop queue, consumers still attached'; end if; -- drop data tables for i in 0 .. (q.queue_ntables - 1) loop tblname := q.queue_data_pfx || '_' || i; execute 'DROP TABLE ' || tblname; end loop; execute 'DROP TABLE ' || q.queue_data_pfx; -- delete ticks delete from pgq.tick where tick_queue = q.queue_id; -- drop seqs -- FIXME: any checks needed here? execute 'DROP SEQUENCE ' || q.queue_tick_seq; execute 'DROP SEQUENCE ' || q.queue_event_seq; -- delete event delete from pgq.queue where queue_name = x_queue_name; return 1; end; $$ language plpgsql security definer; -- Group: Event publishing create or replace function pgq.insert_event(queue_name text, ev_type text, ev_data text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.insert_event(3) -- -- Insert a event into queue. -- -- Parameters: -- queue_name - Name of the queue -- ev_type - User-specified type for the event -- ev_data - User data for the event -- -- Returns: -- Event ID -- ---------------------------------------------------------------------- begin return pgq.insert_event(queue_name, ev_type, ev_data, null, null, null, null); end; $$ language plpgsql security definer; create or replace function pgq.insert_event( queue_name text, ev_type text, ev_data text, ev_extra1 text, ev_extra2 text, ev_extra3 text, ev_extra4 text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.insert_event(7) -- -- Insert a event into queue with all the extra fields. -- -- Parameters: -- queue_name - Name of the queue -- ev_type - User-specified type for the event -- ev_data - User data for the event -- ev_extra1 - Extra data field for the event -- ev_extra2 - Extra data field for the event -- ev_extra3 - Extra data field for the event -- ev_extra4 - Extra data field for the event -- -- Returns: -- Event ID -- ---------------------------------------------------------------------- begin return pgq.insert_event_raw(queue_name, null, now(), null, null, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4); end; $$ language plpgsql security definer; create or replace function pgq.current_event_table(x_queue_name text) returns text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.current_event_table(1) -- -- Return active event table for particular queue. -- Event can be added to it without going via functions, -- e.g. by COPY. -- -- Note: -- The result is valid only during current transaction. -- -- Permissions: -- Actual insertion requires superuser access. -- -- Parameters: -- x_queue_name - Queue name. -- ---------------------------------------------------------------------- declare res text; begin select queue_data_pfx || '_' || queue_cur_table into res from pgq.queue where queue_name = x_queue_name; if not found then raise exception 'Event queue not found'; end if; return res; end; $$ language plpgsql; -- no perms needed -- Group: Subscribing to queue create or replace function pgq.register_consumer( x_queue_name text, x_consumer_id text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.register_consumer(2) -- -- Subscribe consumer on a queue. -- -- From this moment forward, consumer will see all events in the queue. -- -- Parameters: -- x_queue_name - Name of queue -- x_consumer_name - Name of consumer -- -- Returns: -- 0 - if already registered -- 1 - if new registration -- ---------------------------------------------------------------------- begin return pgq.register_consumer(x_queue_name, x_consumer_id, NULL); end; $$ language plpgsql security definer; create or replace function pgq.register_consumer( x_queue_name text, x_consumer_name text, x_tick_pos bigint) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.register_consumer(3) -- -- Extended registration, allows to specify tick_id. -- -- Note: -- For usage in special situations. -- -- Parameters: -- x_queue_name - Name of a queue -- x_consumer_name - Name of consumer -- x_tick_pos - Tick ID -- -- Returns: -- 0/1 whether consumer has already registered. -- ---------------------------------------------------------------------- declare tmp text; last_tick bigint; x_queue_id integer; x_consumer_id integer; queue integer; sub record; begin select queue_id into x_queue_id from pgq.queue where queue_name = x_queue_name; if not found then raise exception 'Event queue not created yet'; end if; -- get consumer and create if new select co_id into x_consumer_id from pgq.consumer where co_name = x_consumer_name; if not found then insert into pgq.consumer (co_name) values (x_consumer_name); x_consumer_id := currval('pgq.consumer_co_id_seq'); end if; -- if particular tick was requested, check if it exists if x_tick_pos is not null then perform 1 from pgq.tick where tick_queue = x_queue_id and tick_id = x_tick_pos; if not found then raise exception 'cannot reposition, tick not found: %', x_tick_pos; end if; end if; -- check if already registered select sub_last_tick, sub_batch into sub from pgq.subscription where sub_consumer = x_consumer_id and sub_queue = x_queue_id; if found then if x_tick_pos is not null then if sub.sub_batch is not null then raise exception 'reposition while active not allowed'; end if; -- update tick pos if requested update pgq.subscription set sub_last_tick = x_tick_pos where sub_consumer = x_consumer_id and sub_queue = x_queue_id; end if; -- already registered return 0; end if; -- new registration if x_tick_pos is null then -- start from current tick select tick_id into last_tick from pgq.tick where tick_queue = x_queue_id order by tick_queue desc, tick_id desc limit 1; if not found then raise exception 'No ticks for this queue. Please run ticker on database.'; end if; else last_tick := x_tick_pos; end if; -- register insert into pgq.subscription (sub_queue, sub_consumer, sub_last_tick) values (x_queue_id, x_consumer_id, last_tick); return 1; end; $$ language plpgsql security definer; create or replace function pgq.unregister_consumer( x_queue_name text, x_consumer_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.unregister_consumer(2) -- -- Unsubscriber consumer from the queue. Also consumer's failed -- and retry events are deleted. -- -- Parameters: -- x_queue_name - Name of the queue -- x_consumer_name - Name of the consumer -- -- Returns: -- nothing -- ---------------------------------------------------------------------- declare x_sub_id integer; begin select sub_id into x_sub_id from pgq.subscription, pgq.consumer, pgq.queue where sub_queue = queue_id and sub_consumer = co_id and queue_name = x_queue_name and co_name = x_consumer_name; if not found then raise exception 'consumer not registered on queue'; end if; delete from pgq.retry_queue where ev_owner = x_sub_id; delete from pgq.failed_queue where ev_owner = x_sub_id; delete from pgq.subscription where sub_id = x_sub_id; return 1; end; $$ language plpgsql security definer; -- Group: Batch processing create or replace function pgq.next_batch(x_queue_name text, x_consumer_name text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.next_batch(2) -- -- Makes next block of events active. -- -- If it returns NULL, there is no events available in queue. -- Consumer should sleep a bith then. -- -- Parameters: -- x_queue_name - Name of the queue -- x_consumer_name - Name of the consumer -- -- Returns: -- Batch ID or NULL if there are no more events available. -- ---------------------------------------------------------------------- declare next_tick bigint; batch_id bigint; errmsg text; sub record; begin select sub_queue, sub_consumer, sub_id, sub_last_tick, sub_batch into sub from pgq.queue q, pgq.consumer c, pgq.subscription s where q.queue_name = x_queue_name and c.co_name = x_consumer_name and s.sub_queue = q.queue_id and s.sub_consumer = c.co_id; if not found then errmsg := 'Not subscriber to queue: ' || coalesce(x_queue_name, 'NULL') || '/' || coalesce(x_consumer_name, 'NULL'); raise exception '%', errmsg; end if; -- has already active batch if sub.sub_batch is not null then return sub.sub_batch; end if; -- find next tick select tick_id into next_tick from pgq.tick where tick_id > sub.sub_last_tick and tick_queue = sub.sub_queue order by tick_queue asc, tick_id asc limit 1; if not found then -- nothing to do return null; end if; -- get next batch batch_id := nextval('pgq.batch_id_seq'); update pgq.subscription set sub_batch = batch_id, sub_next_tick = next_tick, sub_active = now() where sub_queue = sub.sub_queue and sub_consumer = sub.sub_consumer; return batch_id; end; $$ language plpgsql security definer; create or replace function pgq.get_batch_events(x_batch_id bigint) returns setof pgq.ret_batch_event as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_batch_events(1) -- -- Get all events in batch. -- -- Parameters: -- x_batch_id - ID of active batch. -- -- Returns: -- List of events. -- ---------------------------------------------------------------------- declare rec pgq.ret_batch_event%rowtype; sql text; begin sql := pgq.batch_event_sql(x_batch_id); for rec in execute sql loop return next rec; end loop; return; end; $$ language plpgsql; -- no perms needed create or replace function pgq.event_failed( x_batch_id bigint, x_event_id bigint, x_reason text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.event_failed(3) -- -- Copies the event to failed queue so it can be looked at later. -- -- Parameters: -- x_batch_id - ID of active batch. -- x_event_id - Event id -- x_reason - Text to associate with event. -- -- Returns: -- 0 if event was already in queue, 1 otherwise. -- ---------------------------------------------------------------------- begin insert into pgq.failed_queue (ev_failed_reason, ev_failed_time, ev_id, ev_time, ev_txid, ev_owner, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4) select x_reason, now(), ev_id, ev_time, NULL, sub_id, coalesce(ev_retry, 0), ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4 from pgq.get_batch_events(x_batch_id), pgq.subscription where sub_batch = x_batch_id and ev_id = x_event_id; if not found then raise exception 'event not found'; end if; return 1; -- dont worry if the event is already in queue exception when unique_violation then return 0; end; $$ language plpgsql security definer; create or replace function pgq.event_retry( x_batch_id bigint, x_event_id bigint, x_retry_time timestamptz) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.event_retry(3) -- -- Put the event into retry queue, to be processed again later. -- -- Parameters: -- x_batch_id - ID of active batch. -- x_event_id - event id -- x_retry_time - Time when the event should be put back into queue -- -- Returns: -- nothing -- ---------------------------------------------------------------------- begin insert into pgq.retry_queue (ev_retry_after, ev_id, ev_time, ev_txid, ev_owner, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4) select x_retry_time, ev_id, ev_time, NULL, sub_id, coalesce(ev_retry, 0) + 1, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4 from pgq.get_batch_events(x_batch_id), pgq.subscription where sub_batch = x_batch_id and ev_id = x_event_id; if not found then raise exception 'event not found'; end if; return 1; -- dont worry if the event is already in queue exception when unique_violation then return 0; end; $$ language plpgsql security definer; create or replace function pgq.event_retry( x_batch_id bigint, x_event_id bigint, x_retry_seconds integer) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.event_retry(3) -- -- Put the event into retry queue, to be processed later again. -- -- Parameters: -- x_batch_id - ID of active batch. -- x_event_id - event id -- x_retry_seconds - Time when the event should be put back into queue -- -- Returns: -- nothing -- ---------------------------------------------------------------------- declare new_retry timestamptz; begin new_retry := current_timestamp + ((x_retry_seconds || ' seconds')::interval); return pgq.event_retry(x_batch_id, x_event_id, new_retry); end; $$ language plpgsql security definer; create or replace function pgq.finish_batch( x_batch_id bigint) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.finish_batch(1) -- -- Closes a batch. No more operations can be done with events -- of this batch. -- -- Parameters: -- x_batch_id - id of batch. -- -- Returns: -- If batch 1 if batch was found, 0 otherwise. -- ---------------------------------------------------------------------- begin update pgq.subscription set sub_active = now(), sub_last_tick = sub_next_tick, sub_next_tick = null, sub_batch = null where sub_batch = x_batch_id; if not found then raise warning 'finish_batch: batch % not found', x_batch_id; return 0; end if; return 1; end; $$ language plpgsql security definer; -- Group: General info functions create or replace function pgq.get_queue_info() returns setof pgq.ret_queue_info as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_queue_info(0) -- -- Get info about all queues. -- -- Returns: -- List of pgq.ret_queue_info records. -- ---------------------------------------------------------------------- declare q record; ret pgq.ret_queue_info%rowtype; begin for q in select queue_name from pgq.queue order by 1 loop select * into ret from pgq.get_queue_info(q.queue_name); return next ret; end loop; return; end; $$ language plpgsql security definer; create or replace function pgq.get_queue_info(qname text) returns pgq.ret_queue_info as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_queue_info(1) -- -- Get info about particular queue. -- -- Returns: -- One pgq.ret_queue_info record. -- ---------------------------------------------------------------------- declare ret pgq.ret_queue_info%rowtype; begin select queue_name, queue_ntables, queue_cur_table, queue_rotation_period, queue_switch_time, queue_external_ticker, queue_ticker_max_count, queue_ticker_max_lag, queue_ticker_idle_period, (select current_timestamp - tick_time from pgq.tick where tick_queue = queue_id order by tick_queue desc, tick_id desc limit 1 ) as ticker_lag into ret from pgq.queue where queue_name = qname; return ret; end; $$ language plpgsql security definer; ------------------------------------------------------------------------- create or replace function pgq.get_consumer_info() returns setof pgq.ret_consumer_info as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_consumer_info(0) -- -- Returns info about all consumers on all queues. -- -- Returns: -- See pgq.get_consumer_info(2) -- ---------------------------------------------------------------------- declare ret pgq.ret_consumer_info%rowtype; i record; begin for i in select queue_name from pgq.queue order by 1 loop for ret in select * from pgq.get_consumer_info(i.queue_name) loop return next ret; end loop; end loop; return; end; $$ language plpgsql security definer; ------------------------------------------------------------------------- create or replace function pgq.get_consumer_info(x_queue_name text) returns setof pgq.ret_consumer_info as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_consumer_info(1) -- -- Returns info about consumers on one particular queue. -- -- Parameters: -- x_queue_name - Queue name -- -- Returns: -- See pgq.get_consumer_info(2) -- ---------------------------------------------------------------------- declare ret pgq.ret_consumer_info%rowtype; tmp record; begin for tmp in select queue_name, co_name from pgq.queue, pgq.consumer, pgq.subscription where queue_id = sub_queue and co_id = sub_consumer and queue_name = x_queue_name order by 1, 2 loop for ret in select * from pgq.get_consumer_info(tmp.queue_name, tmp.co_name) loop return next ret; end loop; end loop; return; end; $$ language plpgsql security definer; ------------------------------------------------------------------------ create or replace function pgq.get_consumer_info( x_queue_name text, x_consumer_name text) returns setof pgq.ret_consumer_info as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_consumer_info(2) -- -- Get info about particular consumer on particular queue. -- -- Parameters: -- x_queue_name - name of a queue. -- x_consumer_name - name of a consumer -- -- Returns: -- queue_name - Queue name -- consumer_name - Consumer name -- lag - How old are events the consumer is processing -- last_seen - When the consumer seen by pgq -- last_tick - Tick ID of last processed tick -- current_batch - Current batch ID, if one is active or NULL -- next_tick - If batch is active, then its final tick. -- ---------------------------------------------------------------------- declare ret pgq.ret_consumer_info%rowtype; begin for ret in select queue_name, co_name, current_timestamp - tick_time as lag, current_timestamp - sub_active as last_seen, sub_last_tick as last_tick, sub_batch as current_batch, sub_next_tick as next_tick from pgq.subscription, pgq.tick, pgq.queue, pgq.consumer where tick_id = sub_last_tick and queue_id = sub_queue and tick_queue = sub_queue and co_id = sub_consumer and queue_name = x_queue_name and co_name = x_consumer_name order by 1,2 loop return next ret; end loop; return; end; $$ language plpgsql security definer; create or replace function pgq.version() returns text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.version(0) -- -- Returns verison string for pgq. ATM its SkyTools version -- that is only bumped when PGQ database code changes. -- ---------------------------------------------------------------------- begin return '2.1.8'; end; $$ language plpgsql; create or replace function pgq.get_batch_info(x_batch_id bigint) returns pgq.ret_batch_info as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_batch_info(1) -- -- Returns detailed info about a batch. -- -- Parameters: -- x_batch_id - id of a active batch. -- -- Returns: -- Info -- ---------------------------------------------------------------------- declare ret pgq.ret_batch_info%rowtype; begin select queue_name, co_name, prev.tick_time as batch_start, cur.tick_time as batch_end, sub_last_tick, sub_next_tick, current_timestamp - cur.tick_time as lag into ret from pgq.subscription, pgq.tick cur, pgq.tick prev, pgq.queue, pgq.consumer where sub_batch = x_batch_id and prev.tick_id = sub_last_tick and prev.tick_queue = sub_queue and cur.tick_id = sub_next_tick and cur.tick_queue = sub_queue and queue_id = sub_queue and co_id = sub_consumer; return ret; end; $$ language plpgsql security definer; -- Group: Failed queue browsing create or replace function pgq.failed_event_list( x_queue_name text, x_consumer_name text) returns setof pgq.failed_queue as $$ -- ---------------------------------------------------------------------- -- Function: pgq.failed_event_list(2) -- -- Get list of all failed events for one consumer. -- -- Parameters: -- x_queue_name - Queue name -- x_consumer_name - Consumer name -- -- Returns: -- List of failed events. -- ---------------------------------------------------------------------- declare rec pgq.failed_queue%rowtype; begin for rec in select fq.* from pgq.failed_queue fq, pgq.consumer, pgq.queue, pgq.subscription where queue_name = x_queue_name and co_name = x_consumer_name and sub_consumer = co_id and sub_queue = queue_id and ev_owner = sub_id order by ev_id loop return next rec; end loop; return; end; $$ language plpgsql security definer; create or replace function pgq.failed_event_list( x_queue_name text, x_consumer_name text, x_count integer, x_offset integer) returns setof pgq.failed_queue as $$ -- ---------------------------------------------------------------------- -- Function: pgq.failed_event_list(4) -- -- Get list of failed events, from offset and specific count. -- -- Parameters: -- x_queue_name - Queue name -- x_consumer_name - Consumer name -- x_count - Max amount of events to fetch -- x_offset - From this offset -- -- Returns: -- List of failed events. -- ---------------------------------------------------------------------- declare rec pgq.failed_queue%rowtype; begin for rec in select fq.* from pgq.failed_queue fq, pgq.consumer, pgq.queue, pgq.subscription where queue_name = x_queue_name and co_name = x_consumer_name and sub_consumer = co_id and sub_queue = queue_id and ev_owner = sub_id order by ev_id limit x_count offset x_offset loop return next rec; end loop; return; end; $$ language plpgsql security definer; create or replace function pgq.failed_event_count( x_queue_name text, x_consumer_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.failed_event_count(2) -- -- Get size of failed event queue. -- -- Parameters: -- x_queue_name - Queue name -- x_consumer_name - Consumer name -- -- Returns: -- Number of failed events in failed event queue. -- ---------------------------------------------------------------------- declare ret integer; begin select count(1) into ret from pgq.failed_queue, pgq.consumer, pgq.queue, pgq.subscription where queue_name = x_queue_name and co_name = x_consumer_name and sub_queue = queue_id and sub_consumer = co_id and ev_owner = sub_id; return ret; end; $$ language plpgsql security definer; create or replace function pgq.failed_event_delete( x_queue_name text, x_consumer_name text, x_event_id bigint) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.failed_event_delete(3) -- -- Delete specific event from failed event queue. -- -- Parameters: -- x_queue_name - Queue name -- x_consumer_name - Consumer name -- x_event_id - Event ID -- -- Returns: -- nothing -- ---------------------------------------------------------------------- declare x_sub_id integer; begin select sub_id into x_sub_id from pgq.subscription, pgq.consumer, pgq.queue where queue_name = x_queue_name and co_name = x_consumer_name and sub_consumer = co_id and sub_queue = queue_id; if not found then raise exception 'no such queue/consumer'; end if; delete from pgq.failed_queue where ev_owner = x_sub_id and ev_id = x_event_id; if not found then raise exception 'event not found'; end if; return 1; end; $$ language plpgsql security definer; create or replace function pgq.failed_event_retry( x_queue_name text, x_consumer_name text, x_event_id bigint) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.failed_event_retry(3) -- -- Insert specific event from failed queue to main queue. -- -- Parameters: -- x_queue_name - Queue name -- x_consumer_name - Consumer name -- x_event_id - Event ID -- -- Returns: -- nothing -- ---------------------------------------------------------------------- declare ret bigint; x_sub_id integer; begin select sub_id into x_sub_id from pgq.subscription, pgq.consumer, pgq.queue where queue_name = x_queue_name and co_name = x_consumer_name and sub_consumer = co_id and sub_queue = queue_id; if not found then raise exception 'no such queue/consumer'; end if; select pgq.insert_event_raw(x_queue_name, ev_id, ev_time, ev_owner, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4) into ret from pgq.failed_queue, pgq.consumer, pgq.queue where ev_owner = x_sub_id and ev_id = x_event_id; if not found then raise exception 'event not found'; end if; perform pgq.failed_event_delete(x_queue_name, x_consumer_name, x_event_id); return ret; end; $$ language plpgsql security definer; -- Section: Public Triggers -- Group: Trigger Functions -- \i triggers/pgq.logutriga.sql -- ---------------------------------------------------------------------- -- Function: pgq.logtriga() -- -- Deprecated - non-automatic SQL trigger. It puts row data in partial -- SQL form into queue. It does not auto-detect table structure, -- it needs to be passed as trigger arg. -- -- Purpose: -- Used by Londiste to generate replication events. The "partial SQL" -- format is more compact than the urlencoded format but cannot be -- parsed, only applied. Which is fine for Londiste. -- -- Parameters: -- arg1 - queue name -- arg2 - column type spec string where each column corresponds to one char (k/v/i). -- if spec string is shorter than column list, rest of columns default to 'i'. -- -- Column types: -- k - pkey column -- v - normal data column -- i - ignore column -- -- Queue event fields: -- ev_type - I/U/D -- ev_data - partial SQL statement -- ev_extra1 - table name -- -- ---------------------------------------------------------------------- CREATE OR REPLACE FUNCTION pgq.logtriga() RETURNS trigger AS '$libdir/pgq_triggers', 'pgq_logtriga' LANGUAGE C; -- ---------------------------------------------------------------------- -- Function: pgq.sqltriga() -- -- Automatic SQL trigger. It puts row data in partial SQL form into -- queue. It autodetects table structure. -- -- Purpose: -- Written as more flexible version of logtriga to handle exceptional cases -- where there is no primary key index on table etc. -- -- Parameters: -- arg1 - queue name -- argX - any number of optional arg, in any order -- -- Optinal arguments: -- SKIP - The actual operation should be skipped -- ignore=col1[,col2] - don't look at the specified arguments -- pkey=col1[,col2] - Set pkey fields for the table, autodetection will be skipped -- backup - Put urlencoded contents of old row to ev_extra2 -- -- Queue event fields: -- ev_type - I/U/D -- ev_data - partial SQL statement -- ev_extra1 - table name -- ev_extra2 - optional urlencoded backup -- -- ---------------------------------------------------------------------- CREATE OR REPLACE FUNCTION pgq.sqltriga() RETURNS trigger AS '$libdir/pgq_triggers', 'pgq_sqltriga' LANGUAGE C; -- ---------------------------------------------------------------------- -- Function: pgq.logutriga() -- -- Trigger function that puts row data in urlencoded form into queue. -- -- Purpose: -- Used as producer for several PgQ standard consumers (cube_dispatcher, -- queue_mover, table_dispatcher). Basically for cases where the -- consumer wants to parse the event and look at the actual column values. -- -- Trigger parameters: -- arg1 - queue name -- argX - any number of optional arg, in any order -- -- Optinal arguments: -- SKIP - The actual operation should be skipped -- ignore=col1[,col2] - don't look at the specified arguments -- pkey=col1[,col2] - Set pkey fields for the table, autodetection will be skipped -- backup - Put urlencoded contents of old row to ev_extra2 -- -- Queue event fields: -- ev_type - I/U/D ':' pkey_column_list -- ev_data - column values urlencoded -- ev_extra1 - table name -- ev_extra2 - optional urlencoded backup -- -- Regular listen trigger example: -- > CREATE TRIGGER triga_nimi AFTER INSERT OR UPDATE ON customer -- > FOR EACH ROW EXECUTE PROCEDURE pgq.logutriga('qname'); -- -- Redirect trigger example: -- > CREATE TRIGGER triga_nimi BEFORE INSERT OR UPDATE ON customer -- > FOR EACH ROW EXECUTE PROCEDURE pgq.logutriga('qname', 'SKIP'); -- ---------------------------------------------------------------------- CREATE OR REPLACE FUNCTION pgq.logutriga() RETURNS TRIGGER AS '$libdir/pgq_triggers', 'pgq_logutriga' LANGUAGE C; skytools-3.2.6/upgrade/final/londiste.2.1.12.sql0000644000000000000000000007211312426435645016120 0ustar set default_with_oids = 'off'; create schema londiste; create table londiste.provider_table ( nr serial not null, queue_name text not null, table_name text not null, trigger_name text, primary key (queue_name, table_name) ); create table londiste.provider_seq ( nr serial not null, queue_name text not null, seq_name text not null, primary key (queue_name, seq_name) ); create table londiste.completed ( consumer_id text not null, last_tick_id bigint not null, primary key (consumer_id) ); create table londiste.link ( source text not null, dest text not null, primary key (source), unique (dest) ); create table londiste.subscriber_table ( nr serial not null, queue_name text not null, table_name text not null, snapshot text, merge_state text, trigger_name text, skip_truncate bool, primary key (queue_name, table_name) ); create table londiste.subscriber_seq ( nr serial not null, queue_name text not null, seq_name text not null, primary key (queue_name, seq_name) ); create table londiste.subscriber_pending_fkeys ( from_table text not null, to_table text not null, fkey_name text not null, fkey_def text not null, primary key (from_table, fkey_name) ); create table londiste.subscriber_pending_triggers ( table_name text not null, trigger_name text not null, trigger_def text not null, primary key (table_name, trigger_name) ); grant usage on schema londiste to public; grant select on londiste.provider_table to public; grant select on londiste.completed to public; grant select on londiste.link to public; grant select on londiste.subscriber_table to public; create type londiste.ret_provider_table_list as ( table_name text, trigger_name text ); create type londiste.ret_subscriber_table as ( table_name text, merge_state text, snapshot text, trigger_name text, skip_truncate bool ); create or replace function londiste.find_column_types(tbl text) returns text as $$ declare res text; col record; tbl_oid oid; begin tbl_oid := londiste.find_table_oid(tbl); res := ''; for col in SELECT CASE WHEN k.attname IS NOT NULL THEN 'k' ELSE 'v' END AS type FROM pg_attribute a LEFT JOIN ( SELECT k.attname FROM pg_index i, pg_attribute k WHERE i.indrelid = tbl_oid AND k.attrelid = i.indexrelid AND i.indisprimary AND k.attnum > 0 AND NOT k.attisdropped ) k ON (k.attname = a.attname) WHERE a.attrelid = tbl_oid AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum loop res := res || col.type; end loop; return res; end; $$ language plpgsql strict stable; create or replace function londiste.find_table_fkeys(i_table_name text) returns setof londiste.subscriber_pending_fkeys as $$ declare fkey record; tbl_oid oid; begin select londiste.find_table_oid(i_table_name) into tbl_oid; for fkey in select n1.nspname || '.' || t1.relname as from_table, n2.nspname || '.' || t2.relname as to_table, conname::text as fkey_name, 'alter table only ' || quote_ident(n1.nspname) || '.' || quote_ident(t1.relname) || ' add constraint ' || quote_ident(conname::text) || ' ' || pg_get_constraintdef(c.oid) as fkey_def from pg_constraint c, pg_namespace n1, pg_class t1, pg_namespace n2, pg_class t2 where c.contype = 'f' and (c.conrelid = tbl_oid or c.confrelid = tbl_oid) and t1.oid = c.conrelid and n1.oid = t1.relnamespace and t2.oid = c.confrelid and n2.oid = t2.relnamespace order by 1,2,3 loop return next fkey; end loop; return; end; $$ language plpgsql strict stable; create or replace function londiste.find_rel_oid(tbl text, kind text) returns oid as $$ declare res oid; pos integer; schema text; name text; begin pos := position('.' in tbl); if pos > 0 then schema := substring(tbl for pos - 1); name := substring(tbl from pos + 1); else schema := 'public'; name := tbl; end if; select c.oid into res from pg_namespace n, pg_class c where c.relnamespace = n.oid and c.relkind = kind and n.nspname = schema and c.relname = name; if not found then if kind = 'r' then raise exception 'table not found'; elsif kind = 'S' then raise exception 'seq not found'; else raise exception 'weird relkind'; end if; end if; return res; end; $$ language plpgsql strict stable; create or replace function londiste.find_table_oid(tbl text) returns oid as $$ begin return londiste.find_rel_oid(tbl, 'r'); end; $$ language plpgsql strict stable; create or replace function londiste.find_seq_oid(tbl text) returns oid as $$ begin return londiste.find_rel_oid(tbl, 'S'); end; $$ language plpgsql strict stable; create or replace function londiste.find_table_triggers(i_table_name text) returns setof londiste.subscriber_pending_triggers as $$ declare tg record; ver int4; begin select setting::int4 into ver from pg_settings where name = 'server_version_num'; if ver >= 90000 then for tg in select n.nspname || '.' || c.relname as table_name, t.tgname::text as name, pg_get_triggerdef(t.oid) as def from pg_trigger t, pg_class c, pg_namespace n where n.oid = c.relnamespace and c.oid = t.tgrelid and t.tgrelid = londiste.find_table_oid(i_table_name) and not t.tgisinternal loop return next tg; end loop; else for tg in select n.nspname || '.' || c.relname as table_name, t.tgname::text as name, pg_get_triggerdef(t.oid) as def from pg_trigger t, pg_class c, pg_namespace n where n.oid = c.relnamespace and c.oid = t.tgrelid and t.tgrelid = londiste.find_table_oid(i_table_name) and not t.tgisconstraint loop return next tg; end loop; end if; return; end; $$ language plpgsql strict stable; create or replace function londiste.get_last_tick(i_consumer text) returns bigint as $$ declare res bigint; begin select last_tick_id into res from londiste.completed where consumer_id = i_consumer; return res; end; $$ language plpgsql security definer strict stable; create or replace function londiste.link_source(i_dst_name text) returns text as $$ declare res text; begin select source into res from londiste.link where dest = i_dst_name; return res; end; $$ language plpgsql security definer; create or replace function londiste.link_dest(i_source_name text) returns text as $$ declare res text; begin select dest into res from londiste.link where source = i_source_name; return res; end; $$ language plpgsql security definer; create or replace function londiste.cmp_list(list1 text, a_queue text, a_table text, a_field text) returns boolean as $$ declare sql text; tmp record; list2 text; begin sql := 'select ' || quote_ident(a_field) || ' as name from ' || londiste.quote_fqname(a_table) || ' where queue_name = ' || quote_literal(a_queue) || ' order by 1'; list2 := ''; for tmp in execute sql loop if list2 = '' then list2 := tmp.name; else list2 := list2 || ',' || tmp.name; end if; end loop; return list1 = list2; end; $$ language plpgsql security definer; create or replace function londiste.link(i_source_name text, i_dest_name text, prov_tick_id bigint, prov_tbl_list text, prov_seq_list text) returns text as $$ declare tmp text; list text; tick_seq text; external boolean; last_tick bigint; begin -- check if all matches if not londiste.cmp_list(prov_tbl_list, i_source_name, 'londiste.subscriber_table', 'table_name') then raise exception 'not all tables copied into subscriber'; end if; if not londiste.cmp_list(prov_seq_list, i_source_name, 'londiste.subscriber_seq', 'seq_name') then raise exception 'not all seqs copied into subscriber'; end if; if not londiste.cmp_list(prov_seq_list, i_dest_name, 'londiste.provider_table', 'table_name') then raise exception 'linked provider queue does not have all tables'; end if; if not londiste.cmp_list(prov_seq_list, i_dest_name, 'londiste.provider_seq', 'seq_name') then raise exception 'linked provider queue does not have all seqs'; end if; -- check pgq select queue_external_ticker, queue_tick_seq into external, tick_seq from pgq.queue where queue_name = i_dest_name; if not found then raise exception 'dest queue does not exist'; end if; if external then raise exception 'dest queue has already external_ticker turned on?'; end if; if nextval(tick_seq) >= prov_tick_id then raise exception 'dest queue ticks larger'; end if; update pgq.queue set queue_external_ticker = true where queue_name = i_dest_name; insert into londiste.link (source, dest) values (i_source_name, i_dest_name); return null; end; $$ language plpgsql security definer; create or replace function londiste.link_del(i_source_name text, i_dest_name text) returns text as $$ begin delete from londiste.link where source = i_source_name and dest = i_dest_name; if not found then raise exception 'no suck link'; end if; return null; end; $$ language plpgsql security definer; create or replace function londiste.provider_add_seq( i_queue_name text, i_seq_name text) returns integer as $$ declare link text; begin -- check if linked queue link := londiste.link_source(i_queue_name); if link is not null then raise exception 'Linked queue, cannot modify'; end if; perform 1 from pg_class where oid = londiste.find_seq_oid(i_seq_name); if not found then raise exception 'seq not found'; end if; insert into londiste.provider_seq (queue_name, seq_name) values (i_queue_name, i_seq_name); return 0; end; $$ language plpgsql security definer; create or replace function londiste.provider_add_table( i_queue_name text, i_table_name text, i_col_types text ) returns integer strict as $$ declare tgname text; sql text; begin if londiste.link_source(i_queue_name) is not null then raise exception 'Linked queue, manipulation not allowed'; end if; if position('k' in i_col_types) < 1 then raise exception 'need key column'; end if; if position('.' in i_table_name) < 1 then raise exception 'need fully-qualified table name'; end if; select queue_name into tgname from pgq.queue where queue_name = i_queue_name; if not found then raise exception 'no such event queue'; end if; tgname := i_queue_name || '_logger'; tgname := replace(lower(tgname), '.', '_'); insert into londiste.provider_table (queue_name, table_name, trigger_name) values (i_queue_name, i_table_name, tgname); perform londiste.provider_create_trigger( i_queue_name, i_table_name, i_col_types); return 1; end; $$ language plpgsql security definer; create or replace function londiste.provider_add_table( i_queue_name text, i_table_name text ) returns integer as $$ begin return londiste.provider_add_table(i_queue_name, i_table_name, londiste.find_column_types(i_table_name)); end; $$ language plpgsql security definer; create or replace function londiste.provider_create_trigger( i_queue_name text, i_table_name text, i_col_types text ) returns integer strict as $$ declare tgname text; begin select trigger_name into tgname from londiste.provider_table where queue_name = i_queue_name and table_name = i_table_name; if not found then raise exception 'table not found'; end if; execute 'create trigger ' || quote_ident(tgname) || ' after insert or update or delete on ' || londiste.quote_fqname(i_table_name) || ' for each row execute procedure pgq.logtriga(' || quote_literal(i_queue_name) || ', ' || quote_literal(i_col_types) || ', ' || quote_literal(i_table_name) || ')'; return 1; end; $$ language plpgsql security definer; create or replace function londiste.provider_get_seq_list(i_queue_name text) returns setof text as $$ declare rec record; begin for rec in select seq_name from londiste.provider_seq where queue_name = i_queue_name order by nr loop return next rec.seq_name; end loop; return; end; $$ language plpgsql security definer; create or replace function londiste.provider_get_table_list(i_queue text) returns setof londiste.ret_provider_table_list as $$ declare rec londiste.ret_provider_table_list%rowtype; begin for rec in select table_name, trigger_name from londiste.provider_table where queue_name = i_queue order by nr loop return next rec; end loop; return; end; $$ language plpgsql security definer; create or replace function londiste.provider_notify_change(i_queue_name text) returns integer as $$ declare res text; tbl record; begin res := ''; for tbl in select table_name from londiste.provider_table where queue_name = i_queue_name order by nr loop if res = '' then res := tbl.table_name; else res := res || ',' || tbl.table_name; end if; end loop; perform pgq.insert_event(i_queue_name, 'T', res); return 1; end; $$ language plpgsql security definer; create or replace function londiste.provider_refresh_trigger( i_queue_name text, i_table_name text, i_col_types text ) returns integer strict as $$ declare t_name text; tbl_oid oid; begin select trigger_name into t_name from londiste.provider_table where queue_name = i_queue_name and table_name = i_table_name; if not found then raise exception 'table not found'; end if; tbl_oid := londiste.find_table_oid(i_table_name); perform 1 from pg_trigger where tgrelid = tbl_oid and tgname = t_name; if found then execute 'drop trigger ' || quote_ident(t_name) || ' on ' || londiste.quote_fqname(i_table_name); end if; perform londiste.provider_create_trigger(i_queue_name, i_table_name, i_col_types); return 1; end; $$ language plpgsql security definer; create or replace function londiste.provider_refresh_trigger( i_queue_name text, i_table_name text ) returns integer strict as $$ begin return londiste.provider_refresh_trigger(i_queue_name, i_table_name, londiste.find_column_types(i_table_name)); end; $$ language plpgsql security definer; create or replace function londiste.provider_remove_seq( i_queue_name text, i_seq_name text) returns integer as $$ declare link text; begin -- check if linked queue link := londiste.link_source(i_queue_name); if link is not null then raise exception 'Linked queue, cannot modify'; end if; delete from londiste.provider_seq where queue_name = i_queue_name and seq_name = i_seq_name; if not found then raise exception 'seq not attached'; end if; perform londiste.provider_notify_change(i_queue_name); return 0; end; $$ language plpgsql security definer; create or replace function londiste.provider_remove_table( i_queue_name text, i_table_name text ) returns integer as $$ declare tgname text; begin if londiste.link_source(i_queue_name) is not null then raise exception 'Linked queue, manipulation not allowed'; end if; select trigger_name into tgname from londiste.provider_table where queue_name = i_queue_name and table_name = i_table_name; if not found then raise exception 'no such table registered'; end if; begin execute 'drop trigger ' || quote_ident(tgname) || ' on ' || londiste.quote_fqname(i_table_name); exception when undefined_table then raise notice 'table % does not exist', i_table_name; when undefined_object then raise notice 'trigger % does not exist on table %', tgname, i_table_name; end; delete from londiste.provider_table where queue_name = i_queue_name and table_name = i_table_name; return 1; end; $$ language plpgsql security definer; create or replace function londiste.quote_fqname(i_name text) returns text as $$ declare res text; pos integer; s text; n text; begin pos := position('.' in i_name); if pos > 0 then s := substring(i_name for pos - 1); n := substring(i_name from pos + 1); else s := 'public'; n := i_name; end if; return quote_ident(s) || '.' || quote_ident(n); end; $$ language plpgsql strict immutable; create or replace function londiste.set_last_tick( i_consumer text, i_tick_id bigint) returns integer as $$ begin if i_tick_id is null then delete from londiste.completed where consumer_id = i_consumer; else update londiste.completed set last_tick_id = i_tick_id where consumer_id = i_consumer; if not found then insert into londiste.completed (consumer_id, last_tick_id) values (i_consumer, i_tick_id); end if; end if; return 1; end; $$ language plpgsql security definer; create or replace function londiste.subscriber_add_seq( i_queue_name text, i_seq_name text) returns integer as $$ declare link text; begin insert into londiste.subscriber_seq (queue_name, seq_name) values (i_queue_name, i_seq_name); -- update linked queue if needed link := londiste.link_dest(i_queue_name); if link is not null then insert into londiste.provider_seq (queue_name, seq_name) values (link, i_seq_name); perform londiste.provider_notify_change(link); end if; return 0; end; $$ language plpgsql security definer; create or replace function londiste.subscriber_add_table( i_queue_name text, i_table text) returns integer as $$ begin insert into londiste.subscriber_table (queue_name, table_name) values (i_queue_name, i_table); -- linked queue is updated, when the table is copied return 0; end; $$ language plpgsql security definer; create or replace function londiste.subscriber_get_table_pending_fkeys(i_table_name text) returns setof londiste.subscriber_pending_fkeys as $$ declare fkeys record; begin for fkeys in select * from londiste.subscriber_pending_fkeys where from_table=i_table_name or to_table=i_table_name order by 1,2,3 loop return next fkeys; end loop; return; end; $$ language plpgsql; create or replace function londiste.subscriber_get_queue_valid_pending_fkeys(i_queue_name text) returns setof londiste.subscriber_pending_fkeys as $$ declare fkeys record; begin for fkeys in select pf.* from londiste.subscriber_pending_fkeys pf join londiste.subscriber_table st_from on (st_from.table_name = pf.from_table and st_from.merge_state = 'ok' and st_from.snapshot is null) join londiste.subscriber_table st_to on (st_to.table_name = pf.to_table and st_to.merge_state = 'ok' and st_to.snapshot is null) -- change the AND to OR to allow fkeys between tables coming from different queues where (st_from.queue_name = i_queue_name and st_to.queue_name = i_queue_name) order by 1, 2, 3 loop return next fkeys; end loop; return; end; $$ language plpgsql; create or replace function londiste.subscriber_drop_table_fkey(i_from_table text, i_fkey_name text) returns integer as $$ declare fkey record; begin select * into fkey from londiste.find_table_fkeys(i_from_table) where fkey_name = i_fkey_name and from_table = i_from_table; if not found then return 0; end if; insert into londiste.subscriber_pending_fkeys values (fkey.from_table, fkey.to_table, i_fkey_name, fkey.fkey_def); execute 'alter table only ' || londiste.quote_fqname(fkey.from_table) || ' drop constraint ' || quote_ident(i_fkey_name); return 1; end; $$ language plpgsql; create or replace function londiste.subscriber_restore_table_fkey(i_from_table text, i_fkey_name text) returns integer as $$ declare fkey record; begin select * into fkey from londiste.subscriber_pending_fkeys where fkey_name = i_fkey_name and from_table = i_from_table; if not found then return 0; end if; delete from londiste.subscriber_pending_fkeys where fkey_name = fkey.fkey_name; execute fkey.fkey_def; return 1; end; $$ language plpgsql; create or replace function londiste.subscriber_get_seq_list(i_queue_name text) returns setof text as $$ declare rec record; begin for rec in select seq_name from londiste.subscriber_seq where queue_name = i_queue_name order by nr loop return next rec.seq_name; end loop; return; end; $$ language plpgsql security definer; create or replace function londiste.subscriber_get_table_list(i_queue_name text) returns setof londiste.ret_subscriber_table as $$ declare rec londiste.ret_subscriber_table%rowtype; begin for rec in select table_name, merge_state, snapshot, trigger_name, skip_truncate from londiste.subscriber_table where queue_name = i_queue_name order by nr loop return next rec; end loop; return; end; $$ language plpgsql security definer; -- compat create or replace function londiste.get_table_state(i_queue text) returns setof londiste.subscriber_table as $$ declare rec londiste.subscriber_table%rowtype; begin for rec in select * from londiste.subscriber_table where queue_name = i_queue order by nr loop return next rec; end loop; return; end; $$ language plpgsql security definer; create or replace function londiste.subscriber_remove_seq( i_queue_name text, i_seq_name text) returns integer as $$ declare link text; begin delete from londiste.subscriber_seq where queue_name = i_queue_name and seq_name = i_seq_name; if not found then raise exception 'no such seq?'; end if; -- update linked queue if needed link := londiste.link_dest(i_queue_name); if link is not null then delete from londiste.provider_seq where queue_name = link and seq_name = i_seq_name; perform londiste.provider_notify_change(link); end if; return 0; end; $$ language plpgsql security definer; create or replace function londiste.subscriber_remove_table( i_queue_name text, i_table text) returns integer as $$ declare link text; begin delete from londiste.subscriber_table where queue_name = i_queue_name and table_name = i_table; if not found then raise exception 'no such table'; end if; -- sync link link := londiste.link_dest(i_queue_name); if link is not null then delete from londiste.provider_table where queue_name = link and table_name = i_table; perform londiste.provider_notify_change(link); end if; return 0; end; $$ language plpgsql security definer; create or replace function londiste.subscriber_set_skip_truncate( i_queue text, i_table text, i_value bool) returns integer as $$ begin update londiste.subscriber_table set skip_truncate = i_value where queue_name = i_queue and table_name = i_table; if not found then raise exception 'table not found'; end if; return 1; end; $$ language plpgsql security definer; create or replace function londiste.subscriber_set_table_state( i_queue_name text, i_table_name text, i_snapshot text, i_merge_state text) returns integer as $$ declare link text; ok integer; begin update londiste.subscriber_table set snapshot = i_snapshot, merge_state = i_merge_state, -- reset skip_snapshot when table is copied over skip_truncate = case when i_merge_state = 'ok' then null else skip_truncate end where queue_name = i_queue_name and table_name = i_table_name; if not found then raise exception 'no such table'; end if; -- sync link state also link := londiste.link_dest(i_queue_name); if link then select * from londiste.provider_table where queue_name = linkdst and table_name = i_table_name; if found then if i_merge_state is null or i_merge_state <> 'ok' then delete from londiste.provider_table where queue_name = link and table_name = i_table_name; perform londiste.notify_change(link); end if; else if i_merge_state = 'ok' then insert into londiste.provider_table (queue_name, table_name) values (link, i_table_name); perform londiste.notify_change(link); end if; end if; end if; return 1; end; $$ language plpgsql security definer; create or replace function londiste.set_table_state( i_queue_name text, i_table_name text, i_snapshot text, i_merge_state text) returns integer as $$ begin return londiste.subscriber_set_table_state(i_queue_name, i_table_name, i_snapshot, i_merge_state); end; $$ language plpgsql security definer; create or replace function londiste.subscriber_get_table_pending_triggers(i_table_name text) returns setof londiste.subscriber_pending_triggers as $$ declare trigger record; begin for trigger in select * from londiste.subscriber_pending_triggers where table_name = i_table_name loop return next trigger; end loop; return; end; $$ language plpgsql strict stable; create or replace function londiste.subscriber_drop_table_trigger(i_table_name text, i_trigger_name text) returns integer as $$ declare trig_def record; begin select * into trig_def from londiste.find_table_triggers(i_table_name) where trigger_name = i_trigger_name; if FOUND is not true then return 0; end if; insert into londiste.subscriber_pending_triggers(table_name, trigger_name, trigger_def) values (i_table_name, i_trigger_name, trig_def.trigger_def); execute 'drop trigger ' || quote_ident(i_trigger_name) || ' on ' || londiste.quote_fqname(i_table_name); return 1; end; $$ language plpgsql; create or replace function londiste.subscriber_drop_all_table_triggers(i_table_name text) returns integer as $$ declare trigger record; begin for trigger in select trigger_name as name from londiste.find_table_triggers(i_table_name) loop perform londiste.subscriber_drop_table_trigger(i_table_name, trigger.name); end loop; return 1; end; $$ language plpgsql; create or replace function londiste.subscriber_restore_table_trigger(i_table_name text, i_trigger_name text) returns integer as $$ declare trig_def text; begin select trigger_def into trig_def from londiste.subscriber_pending_triggers where (table_name, trigger_name) = (i_table_name, i_trigger_name); if not found then return 0; end if; delete from londiste.subscriber_pending_triggers where table_name = i_table_name and trigger_name = i_trigger_name; execute trig_def; return 1; end; $$ language plpgsql; create or replace function londiste.subscriber_restore_all_table_triggers(i_table_name text) returns integer as $$ declare trigger record; begin for trigger in select trigger_name as name from londiste.subscriber_get_table_pending_triggers(i_table_name) loop perform londiste.subscriber_restore_table_trigger(i_table_name, trigger.name); end loop; return 1; end; $$ language plpgsql; create or replace function londiste.version() returns text as $$ begin return '2.1.12'; end; $$ language plpgsql; skytools-3.2.6/upgrade/final/londiste.upgrade_2.1_to_3.1.sql0000644000000000000000000025040112426435645020467 0ustar drop function if exists londiste.find_table_fkeys(text); -- ---------------------------------------------------------------------- -- Section: Londiste internals -- -- Londiste storage: tables/seqs/fkeys/triggers/events. -- -- Londiste event types: -- I/U/D - partial SQL event from pgq.sqltriga() -- I:/U:/D: - urlencoded event from pgq.logutriga() -- EXECUTE - SQL script execution -- TRUNCATE - table truncation -- londiste.add-table - global table addition -- londiste.remove-table - global table removal -- londiste.update-seq - sequence update -- londiste.remove-seq - global sequence removal -- -- pgq.sqltriga() event: -- ev_type - I/U/D which means insert/update/delete -- ev_data - partial SQL -- ev_extra1 - table name -- -- Insert: ev_type = "I", ev_data = "(col1, col2) values (2, 'foo')", ev_extra1 = "public.tblname" -- -- Update: ev_type = "U", ev_data = "col2 = null where col1 = 2", ev_extra1 = "public.tblname" -- -- Delete: ev_type = "D", ev_data = "col1 = 2", ev_extra1 = "public.tblname" -- -- pgq.logutriga() event: -- ev_type - I:/U:/D: plus comma separated list of pkey columns -- ev_data - urlencoded row columns -- ev_extra1 - table name -- -- Insert: ev_type = "I:col1", ev_data = "" -- -- Truncate trigger event: -- ev_type - TRUNCATE -- ev_extra1 - table name -- -- Execute SQL event: -- ev_type - EXECUTE -- ev_data - SQL script -- ev_extra1 - Script ID -- -- Global table addition: -- ev_type - londiste.add-table -- ev_data - table name -- -- Global table removal: -- ev_type - londiste.remove-table -- ev_data - table name -- -- Global sequence update: -- ev_type - londiste.update-seq -- ev_data - seq value -- ev_extra1 - seq name --5) -- Global sequence removal: -- ev_type - londiste.remove-seq -- ev_data - seq name -- ---------------------------------------------------------------------- set default_with_oids = 'off'; -- ---------------------------------------------------------------------- -- Table: londiste.table_info -- -- Info about registered tables. -- -- Columns: -- nr - number for visual ordering -- queue_name - Cascaded queue name -- table_name - fully-qualified table name -- local - Is used locally -- merge_state - State for tables -- custom_snapshot - remote snapshot for COPY command -- dropped_ddl - temp place to store ddl -- table_attrs - urlencoded dict of extra attributes -- -- Tables merge states: -- NULL - copy has not yet happened -- in-copy - ongoing bulk copy -- catching-up - copy process applies events that happened during copy -- wanna-sync:% - copy process caught up, wants to hand table over to replay -- do-sync:% - replay process is ready to accept the table -- ok - in sync, replay applies events -- ---------------------------------------------------------------------- create table londiste.table_info ( nr serial not null, queue_name text not null, table_name text not null, local boolean not null default false, merge_state text, custom_snapshot text, dropped_ddl text, table_attrs text, dest_table text, primary key (queue_name, table_name), foreign key (queue_name) references pgq_node.node_info (queue_name) on delete cascade, check (dropped_ddl is null or merge_state in ('in-copy', 'catching-up')) ); -- ---------------------------------------------------------------------- -- Table: londiste.seq_info -- -- Sequences available on this queue. -- -- Columns: -- nr - number for visual ordering -- queue_name - cascaded queue name -- seq_name - fully-qualified seq name -- local - there is actual seq on local node -- last_value - last published value from root -- ---------------------------------------------------------------------- create table londiste.seq_info ( nr serial not null, queue_name text not null, seq_name text not null, local boolean not null default false, last_value int8 not null, primary key (queue_name, seq_name), foreign key (queue_name) references pgq_node.node_info (queue_name) on delete cascade ); -- ---------------------------------------------------------------------- -- Table: londiste.applied_execute -- -- Info about EXECUTE commands that are ran. -- -- Columns: -- queue_name - cascaded queue name -- execute_file - filename / unique id -- execute_time - the time execute happened -- execute_sql - contains SQL for EXECUTE event (informative) -- ---------------------------------------------------------------------- create table londiste.applied_execute ( queue_name text not null, execute_file text not null, execute_time timestamptz not null default now(), execute_sql text not null, execute_attrs text, primary key (execute_file) ); -- ---------------------------------------------------------------------- -- Table: londiste.pending_fkeys -- -- Details on dropped fkeys. Global, not specific to any set. -- -- Columns: -- from_table - fully-qualified table name -- to_table - fully-qualified table name -- fkey_name - name of constraint -- fkey_def - full fkey definition -- ---------------------------------------------------------------------- create table londiste.pending_fkeys ( from_table text not null, to_table text not null, fkey_name text not null, fkey_def text not null, primary key (from_table, fkey_name) ); -- Section: Londiste functions -- upgrade schema create or replace function londiste.upgrade_schema() returns int4 as $$ -- updates table structure if necessary declare cnt int4 = 0; begin -- table_info: check (dropped_ddl is null or merge_state in ('in-copy', 'catching-up')) perform 1 from information_schema.check_constraints where constraint_schema = 'londiste' and constraint_name = 'table_info_check' and position('in-copy' in check_clause) > 0 and position('catching' in check_clause) = 0; if found then alter table londiste.table_info drop constraint table_info_check; alter table londiste.table_info add constraint table_info_check check (dropped_ddl is null or merge_state in ('in-copy', 'catching-up')); cnt := cnt + 1; end if; -- table_info.dest_table perform 1 from information_schema.columns where table_schema = 'londiste' and table_name = 'table_info' and column_name = 'dest_table'; if not found then alter table londiste.table_info add column dest_table text; end if; -- applied_execute.dest_table perform 1 from information_schema.columns where table_schema = 'londiste' and table_name = 'applied_execute' and column_name = 'execute_attrs'; if not found then alter table londiste.applied_execute add column execute_attrs text; end if; -- applied_execute: drop queue_name from primary key perform 1 from pg_catalog.pg_indexes where schemaname = 'londiste' and tablename = 'applied_execute' and indexname = 'applied_execute_pkey' and indexdef like '%queue_name%'; if found then alter table londiste.applied_execute drop constraint applied_execute_pkey; alter table londiste.applied_execute add constraint applied_execute_pkey primary key (execute_file); end if; -- applied_execute: drop fkey to pgq_node perform 1 from information_schema.table_constraints where constraint_schema = 'londiste' and table_schema = 'londiste' and table_name = 'applied_execute' and constraint_type = 'FOREIGN KEY' and constraint_name = 'applied_execute_queue_name_fkey'; if found then alter table londiste.applied_execute drop constraint applied_execute_queue_name_fkey; end if; -- create roles perform 1 from pg_catalog.pg_roles where rolname = 'londiste_writer'; if not found then create role londiste_writer in role pgq_admin; cnt := cnt + 1; end if; perform 1 from pg_catalog.pg_roles where rolname = 'londiste_reader'; if not found then create role londiste_reader in role pgq_reader; cnt := cnt + 1; end if; return cnt; end; $$ language plpgsql; select londiste.upgrade_schema(); -- Group: Information create or replace function londiste.get_seq_list( in i_queue_name text, out seq_name text, out last_value int8, out local boolean) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: londiste.get_seq_list(1) -- -- Returns registered seqs on this Londiste node. -- -- Result fiels: -- seq_name - fully qualified name of sequence -- last_value - last globally published value -- local - is locally registered -- ---------------------------------------------------------------------- declare rec record; begin for seq_name, last_value, local in select s.seq_name, s.last_value, s.local from londiste.seq_info s where s.queue_name = i_queue_name order by s.nr, s.seq_name loop return next; end loop; return; end; $$ language plpgsql strict; drop function if exists londiste.get_table_list(text); create or replace function londiste.get_table_list( in i_queue_name text, out table_name text, out local boolean, out merge_state text, out custom_snapshot text, out table_attrs text, out dropped_ddl text, out copy_role text, out copy_pos int4, out dest_table text) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: londiste.get_table_list(1) -- -- Return info about registered tables. -- -- Parameters: -- i_queue_name - cascaded queue name -- -- Returns: -- table_name - fully-quelified table name -- local - does events needs to be applied to local table -- merge_state - show phase of initial copy -- custom_snapshot - remote snapshot of COPY transaction -- table_attrs - urlencoded dict of table attributes -- dropped_ddl - partition combining: temp place to put DDL -- copy_role - partition combining: how to handle copy -- copy_pos - position in parallel copy working order -- -- copy_role = lead: -- on copy start, drop indexes and store in dropped_ddl -- on copy finish change state to catching-up, then wait until copy_role turns to NULL -- catching-up: if dropped_ddl not NULL, restore them -- copy_role = wait-copy: -- on copy start wait, until role changes (to wait-replay) -- copy_role = wait-replay: -- on copy finish, tag as 'catching-up' -- wait until copy_role is NULL, then proceed -- ---------------------------------------------------------------------- begin for table_name, local, merge_state, custom_snapshot, table_attrs, dropped_ddl, dest_table in select t.table_name, t.local, t.merge_state, t.custom_snapshot, t.table_attrs, t.dropped_ddl, t.dest_table from londiste.table_info t where t.queue_name = i_queue_name order by t.nr, t.table_name loop copy_role := null; copy_pos := 0; if merge_state in ('in-copy', 'catching-up') then select f.copy_role, f.copy_pos from londiste._coordinate_copy(i_queue_name, table_name) f into copy_role, copy_pos; end if; return next; end loop; return; end; $$ language plpgsql strict stable; create or replace function londiste._coordinate_copy( in i_queue_name text, in i_table_name text, out copy_role text, out copy_pos int4) as $$ -- if the table is in middle of copy from multiple partitions, -- the copy processes need coordination. declare q_part1 text; q_part_ddl text; n_parts int4; n_done int4; _table_name text; n_combined_queue text; merge_state text; dest_table text; dropped_ddl text; begin copy_pos := 0; copy_role := null; select t.merge_state, t.dest_table, t.dropped_ddl, min(case when t2.local then t2.queue_name else null end) as _queue1, min(case when t2.local and t2.dropped_ddl is not null then t2.queue_name else null end) as _queue1ddl, count(case when t2.local then t2.table_name else null end) as _total, count(case when t2.local then nullif(t2.merge_state, 'in-copy') else null end) as _done, min(n.combined_queue) as _combined_queue, count(nullif(t2.queue_name < i_queue_name and t.merge_state = 'in-copy' and t2.merge_state = 'in-copy', false)) as _copy_pos from londiste.table_info t join pgq_node.node_info n on (n.queue_name = t.queue_name) left join pgq_node.node_info n2 on (n2.combined_queue = n.combined_queue or (n2.combined_queue is null and n.combined_queue is null)) left join londiste.table_info t2 on (coalesce(t2.dest_table, t2.table_name) = coalesce(t.dest_table, t.table_name) and t2.queue_name = n2.queue_name and (t2.merge_state is null or t2.merge_state != 'ok')) where t.queue_name = i_queue_name and t.table_name = i_table_name group by t.nr, t.table_name, t.local, t.merge_state, t.custom_snapshot, t.table_attrs, t.dropped_ddl, t.dest_table into merge_state, dest_table, dropped_ddl, q_part1, q_part_ddl, n_parts, n_done, n_combined_queue, copy_pos; -- q_part1, q_part_ddl, n_parts, n_done, n_combined_queue, copy_pos, dest_table -- be more robust against late joiners q_part1 := coalesce(q_part_ddl, q_part1); -- turn the logic off if no merge is happening if n_parts = 1 then q_part1 := null; end if; if q_part1 is not null then if i_queue_name = q_part1 then -- lead if merge_state = 'in-copy' then if dropped_ddl is null and n_done > 0 then -- seems late addition, let it copy with indexes copy_role := 'wait-replay'; elsif n_done < n_parts then -- show copy_role only if need to drop ddl or already did drop ddl copy_role := 'lead'; end if; -- make sure it cannot be made to wait copy_pos := 0; end if; if merge_state = 'catching-up' and dropped_ddl is not null then -- show copy_role only if need to wait for others if n_done < n_parts then copy_role := 'wait-replay'; end if; end if; else -- follow if merge_state = 'in-copy' then if q_part_ddl is not null then -- can copy, wait in replay until lead has applied ddl copy_role := 'wait-replay'; elsif n_done > 0 then -- ddl is not dropped, others are active, copy without touching ddl copy_role := 'wait-replay'; else -- wait for lead to drop ddl copy_role := 'wait-copy'; end if; elsif merge_state = 'catching-up' then -- show copy_role only if need to wait for lead if q_part_ddl is not null then copy_role := 'wait-replay'; end if; end if; end if; end if; return; end; $$ language plpgsql strict stable; create or replace function londiste.local_show_missing( in i_queue_name text, out obj_kind text, out obj_name text) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_show_missing(1) -- -- Return info about missing tables. On root show tables -- not registered on set, on branch/leaf show tables -- in set but not registered locally. -- ---------------------------------------------------------------------- begin if pgq_node.is_root_node(i_queue_name) then for obj_kind, obj_name in select r.relkind, n.nspname || '.' || r.relname from pg_catalog.pg_class r, pg_catalog.pg_namespace n where n.oid = r.relnamespace and r.relkind in ('r', 'S') and n.nspname not in ('pgq', 'pgq_ext', 'pgq_node', 'londiste', 'pg_catalog', 'information_schema') and n.nspname !~ '^pg_(toast|temp)' and not exists (select 1 from londiste.table_info where queue_name = i_queue_name and local and coalesce(dest_table, table_name) = (n.nspname || '.' || r.relname)) order by 1, 2 loop return next; end loop; else for obj_kind, obj_name in select 'S', s.seq_name from londiste.seq_info s where s.queue_name = i_queue_name and not s.local union all select 'r', t.table_name from londiste.table_info t where t.queue_name = i_queue_name and not t.local order by 1, 2 loop return next; end loop; end if; return; end; $$ language plpgsql strict stable; -- Group: Local object registration (setup tool) create or replace function londiste.local_add_seq( in i_queue_name text, in i_seq_name text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_add_seq(2) -- -- Register sequence. -- -- Parameters: -- i_queue_name - cascaded queue name -- i_seq_name - seq name -- -- Returns: -- 200 - OK -- 400 - Not found -- ---------------------------------------------------------------------- declare fq_seq_name text; lastval int8; seq record; begin fq_seq_name := londiste.make_fqname(i_seq_name); perform 1 from pg_class where oid = londiste.find_seq_oid(fq_seq_name); if not found then select 400, 'Sequence not found: ' || fq_seq_name into ret_code, ret_note; return; end if; if pgq_node.is_root_node(i_queue_name) then select local, last_value into seq from londiste.seq_info where queue_name = i_queue_name and seq_name = fq_seq_name for update; if found and seq.local then select 201, 'Sequence already added: ' || fq_seq_name into ret_code, ret_note; return; end if; if not seq.local then update londiste.seq_info set local = true where queue_name = i_queue_name and seq_name = fq_seq_name; else insert into londiste.seq_info (queue_name, seq_name, local, last_value) values (i_queue_name, fq_seq_name, true, 0); end if; perform * from londiste.root_check_seqs(i_queue_name); else select local, last_value into seq from londiste.seq_info where queue_name = i_queue_name and seq_name = fq_seq_name for update; if not found then select 404, 'Unknown sequence: ' || fq_seq_name into ret_code, ret_note; return; end if; if seq.local then select 201, 'Sequence already added: ' || fq_seq_name into ret_code, ret_note; return; end if; update londiste.seq_info set local = true where queue_name = i_queue_name and seq_name = fq_seq_name; perform pgq.seq_setval(fq_seq_name, seq.last_value); end if; select 200, 'Sequence added: ' || fq_seq_name into ret_code, ret_note; return; end; $$ language plpgsql; create or replace function londiste.local_add_table( in i_queue_name text, in i_table_name text, in i_trg_args text[], in i_table_attrs text, in i_dest_table text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_add_table(5) -- -- Register table on Londiste node, with customizable trigger args. -- -- Parameters: -- i_queue_name - queue name -- i_table_name - table name -- i_trg_args - args to trigger, or magic parameters. -- i_table_attrs - args to python handler -- i_dest_table - actual name of destination table (NULL if same) -- -- Trigger args: -- See documentation for pgq triggers. -- -- Magic parameters: -- no_triggers - skip trigger creation -- skip_truncate - set 'skip_truncate' table attribute -- expect_sync - set table state to 'ok' -- tgflags=X - trigger creation flags -- merge_all - merge table from all sources. required for -- multi-source table -- no_merge - do not merge tables from different sources -- skip - create skip trigger. same as S flag -- virtual_table - skips structure check and trigger creation -- -- Trigger creation flags (default: AIUDL): -- I - ON INSERT -- U - ON UPDATE -- D - ON DELETE -- Q - use pgq.sqltriga() as trigger function -- L - use pgq.logutriga() as trigger function -- B - BEFORE -- A - AFTER -- S - SKIP -- -- Example: -- > londiste.local_add_table('q', 'tbl', array['tgflags=BI', 'SKIP', 'pkey=col1,col2']) -- -- Returns: -- 200 - Ok -- 301 - Warning, trigger exists that will fire before londiste one -- 400 - No such set ------------------------------------------------------------------------ declare col_types text; fq_table_name text; new_state text; trunctrg_name text; pgversion int; logtrg_previous text; lg_name text; lg_func text; lg_pos text; lg_event text; lg_args text; _extra_args text; tbl record; i integer; j integer; sql text; arg text; _node record; _tbloid oid; _combined_queue text; _combined_table text; -- skip trigger _skip_prefix text := 'zzz_'; _skip_trg_count integer; _skip_trg_name text; -- check local tables from all sources _queue_name text; _local boolean; -- array with all tgflags values _check_flags char[] := array['B','A','Q','L','I','U','D','S']; -- given tgflags array _tgflags char[]; -- ordinary argument array _args text[]; -- argument flags _expect_sync boolean := false; _merge_all boolean := false; _no_merge boolean := false; _skip_truncate boolean := false; _no_triggers boolean := false; _skip boolean := false; _virtual_table boolean := false; _dest_table text; _got_extra1 boolean := false; _table_name2 text; _desc text; begin -------- i_trg_args ARGUMENTS PARSING if array_lower(i_trg_args, 1) is not null then for i in array_lower(i_trg_args, 1) .. array_upper(i_trg_args, 1) loop arg := i_trg_args[i]; if arg like 'tgflags=%' then -- special flag handling arg := upper(substr(arg, 9)); for j in array_lower(_check_flags, 1) .. array_upper(_check_flags, 1) loop if position(_check_flags[j] in arg) > 0 then _tgflags := array_append(_tgflags, _check_flags[j]); end if; end loop; elsif arg = 'expect_sync' then _expect_sync := true; elsif arg = 'skip_truncate' then _skip_truncate := true; elsif arg = 'no_triggers' then _no_triggers := true; elsif arg = 'merge_all' then _merge_all = true; elsif arg = 'no_merge' then _no_merge = true; elsif lower(arg) = 'skip' then _skip := true; elsif arg = 'virtual_table' then _virtual_table := true; _expect_sync := true; -- do not copy _no_triggers := true; -- do not create triggers else if arg like 'ev_extra1=%' then _got_extra1 := true; end if; -- ordinary arg _args = array_append(_args, quote_literal(arg)); end if; end loop; end if; if _merge_all and _no_merge then select 405, 'Cannot use merge-all and no-merge together' into ret_code, ret_note; return; end if; fq_table_name := londiste.make_fqname(i_table_name); _dest_table := londiste.make_fqname(coalesce(i_dest_table, i_table_name)); if _dest_table <> fq_table_name and not _got_extra1 then -- if renamed table, enforce trigger to put -- global table name into extra1 arg := 'ev_extra1=' || quote_literal(fq_table_name); _args := array_append(_args, quote_literal(arg)); end if; if _dest_table = fq_table_name then _desc := fq_table_name; else _desc := fq_table_name || '(' || _dest_table || ')'; end if; -------- TABLE STRUCTURE CHECK if not _virtual_table then _tbloid := londiste.find_table_oid(_dest_table); if _tbloid is null then select 404, 'Table does not exist: ' || _desc into ret_code, ret_note; return; end if; col_types := londiste.find_column_types(_dest_table); if position('k' in col_types) < 1 then -- allow missing primary key in case of combined table where -- pkey was removed by londiste perform 1 from londiste.table_info t, pgq_node.node_info n_this, pgq_node.node_info n_other where n_this.queue_name = i_queue_name and n_other.combined_queue = n_this.combined_queue and n_other.queue_name <> n_this.queue_name and t.queue_name = n_other.queue_name and coalesce(t.dest_table, t.table_name) = _dest_table and t.dropped_ddl is not null; if not found then select 400, 'Primary key missing on table: ' || _desc into ret_code, ret_note; return; end if; end if; end if; -------- TABLE REGISTRATION LOGIC select * from pgq_node.get_node_info(i_queue_name) into _node; if not found or _node.ret_code >= 400 then select 400, 'No such set: ' || i_queue_name into ret_code, ret_note; return; end if; select merge_state, local into tbl from londiste.table_info where queue_name = i_queue_name and table_name = fq_table_name; if not found then -- add to set on root if _node.node_type = 'root' then select f.ret_code, f.ret_note into ret_code, ret_note from londiste.global_add_table(i_queue_name, i_table_name) f; if ret_code <> 200 then return; end if; else select 404, 'Table not available on queue: ' || _desc into ret_code, ret_note; return; end if; -- reload info select merge_state, local into tbl from londiste.table_info where queue_name = i_queue_name and table_name = fq_table_name; end if; if tbl.local then select 200, 'Table already added: ' || _desc into ret_code, ret_note; return; end if; if _node.node_type = 'root' then new_state := 'ok'; perform londiste.root_notify_change(i_queue_name, 'londiste.add-table', fq_table_name); elsif _node.node_type = 'leaf' and _node.combined_type = 'branch' then new_state := 'ok'; elsif _expect_sync then new_state := 'ok'; else new_state := NULL; end if; update londiste.table_info set local = true, merge_state = new_state, table_attrs = coalesce(i_table_attrs, table_attrs), dest_table = nullif(_dest_table, fq_table_name) where queue_name = i_queue_name and table_name = fq_table_name; if not found then raise exception 'lost table: %', fq_table_name; end if; -- merge all table sources on leaf if _node.node_type = 'leaf' and not _no_merge then for _queue_name, _table_name2, _local in select t2.queue_name, t2.table_name, t2.local from londiste.table_info t join pgq_node.node_info n on (n.queue_name = t.queue_name) left join pgq_node.node_info n2 on (n2.combined_queue = n.combined_queue or (n2.combined_queue is null and n.combined_queue is null)) left join londiste.table_info t2 on (t2.queue_name = n2.queue_name and coalesce(t2.dest_table, t2.table_name) = coalesce(t.dest_table, t.table_name)) where t.queue_name = i_queue_name and t.table_name = fq_table_name and t2.queue_name != i_queue_name -- skip self loop -- if table from some other source is already marked as local, -- raise error if _local and coalesce(new_state, 'x') <> 'ok' then select 405, 'Found local table '|| _desc || ' in queue ' || _queue_name || ', use remove-table first to remove all previous ' || 'table subscriptions' into ret_code, ret_note; return; end if; -- when table comes from multiple sources, merge_all switch is -- required if not _merge_all and coalesce(new_state, 'x') <> 'ok' then select 405, 'Found multiple sources for table '|| _desc || ', use merge-all or no-merge to continue' into ret_code, ret_note; return; end if; update londiste.table_info set local = true, merge_state = new_state, table_attrs = coalesce(i_table_attrs, table_attrs) where queue_name = _queue_name and table_name = _table_name2; if not found then raise exception 'lost table: % on queue %', _table_name2, _queue_name; end if; end loop; -- if this node has combined_queue, add table there too -- note: we need to keep both table_name/dest_table values select n2.queue_name, t.table_name from pgq_node.node_info n1 join pgq_node.node_info n2 on (n2.queue_name = n1.combined_queue) left join londiste.table_info t on (t.queue_name = n2.queue_name and t.table_name = fq_table_name and t.local) where n1.queue_name = i_queue_name and n2.node_type = 'root' into _combined_queue, _combined_table; if found and _combined_table is null then select f.ret_code, f.ret_note from londiste.local_add_table(_combined_queue, fq_table_name, i_trg_args, i_table_attrs, _dest_table) f into ret_code, ret_note; if ret_code >= 300 then return; end if; end if; end if; if _skip_truncate then perform 1 from londiste.local_set_table_attrs(i_queue_name, fq_table_name, coalesce(i_table_attrs || '&skip_truncate=1', 'skip_truncate=1')); end if; -------- TRIGGER LOGIC -- new trigger _extra_args := ''; lg_name := '_londiste_' || i_queue_name; lg_func := 'pgq.logutriga'; lg_event := ''; lg_args := quote_literal(i_queue_name); lg_pos := 'after'; if array_lower(_args, 1) is not null then lg_args := lg_args || ', ' || array_to_string(_args, ', '); end if; if 'B' = any(_tgflags) then lg_pos := 'before'; end if; if 'A' = any(_tgflags) then lg_pos := 'after'; end if; if 'Q' = any(_tgflags) then lg_func := 'pgq.sqltriga'; end if; if 'L' = any(_tgflags) then lg_func := 'pgq.logutriga'; end if; if 'I' = any(_tgflags) then lg_event := lg_event || ' or insert'; end if; if 'U' = any(_tgflags) then lg_event := lg_event || ' or update'; end if; if 'D' = any(_tgflags) then lg_event := lg_event || ' or delete'; end if; if 'S' = any(_tgflags) then _skip := true; end if; if _node.node_type = 'leaf' then -- on weird leafs the trigger funcs may not exist perform 1 from pg_proc p join pg_namespace n on (n.oid = p.pronamespace) where n.nspname = 'pgq' and p.proname in ('logutriga', 'sqltriga'); if not found then select 200, 'Table added with no triggers: ' || _desc into ret_code, ret_note; return; end if; -- on regular leaf, install deny trigger _extra_args := ', ' || quote_literal('deny'); end if; -- if skip param given, rename previous skip triggers and prefix current if _skip then -- get count and name of existing skip triggers select count(*), min(t.tgname) into _skip_trg_count, _skip_trg_name from pg_catalog.pg_trigger t where t.tgrelid = londiste.find_table_oid(_dest_table) and position(E'\\000skip\\000' in lower(tgargs::text)) > 0; -- if no previous skip triggers, prefix name and add SKIP to args if _skip_trg_count = 0 then lg_name := _skip_prefix || lg_name; lg_args := lg_args || ', ' || quote_literal('SKIP'); -- if one previous skip trigger, check it's prefix and -- do not use SKIP on current trigger elsif _skip_trg_count = 1 then -- if not prefixed then rename if position(_skip_prefix in _skip_trg_name) != 1 then sql := 'alter trigger ' || _skip_trg_name || ' on ' || londiste.quote_fqname(_dest_table) || ' rename to ' || _skip_prefix || _skip_trg_name; execute sql; end if; else select 405, 'Multiple SKIP triggers in table: ' || _desc into ret_code, ret_note; return; end if; end if; -- create Ins/Upd/Del trigger if it does not exists already perform 1 from pg_catalog.pg_trigger where tgrelid = londiste.find_table_oid(_dest_table) and tgname = lg_name; if not found then if _no_triggers then select 200, 'Table added with no triggers: ' || _desc into ret_code, ret_note; return; end if; -- finalize event lg_event := substr(lg_event, 4); if lg_event = '' then lg_event := 'insert or update or delete'; end if; -- create trigger sql := 'create trigger ' || quote_ident(lg_name) || ' ' || lg_pos || ' ' || lg_event || ' on ' || londiste.quote_fqname(_dest_table) || ' for each row execute procedure ' || lg_func || '(' || lg_args || _extra_args || ')'; execute sql; end if; -- create truncate trigger if it does not exists already show server_version_num into pgversion; if pgversion >= 80400 then trunctrg_name := '_londiste_' || i_queue_name || '_truncate'; perform 1 from pg_catalog.pg_trigger where tgrelid = londiste.find_table_oid(_dest_table) and tgname = trunctrg_name; if not found then sql := 'create trigger ' || quote_ident(trunctrg_name) || ' after truncate on ' || londiste.quote_fqname(_dest_table) || ' for each statement execute procedure pgq.sqltriga(' || quote_literal(i_queue_name) || _extra_args || ')'; execute sql; end if; end if; -- Check that no trigger exists on the target table that will get fired -- before londiste one (this could have londiste replicate data -- out-of-order -- -- Don't report all the trigger names, 8.3 does not have array_accum -- available if pgversion >= 90000 then select tg.tgname into logtrg_previous from pg_class r join pg_trigger tg on (tg.tgrelid = r.oid) where r.oid = londiste.find_table_oid(_dest_table) and not tg.tgisinternal and tg.tgname < lg_name::name -- per-row AFTER trigger and (tg.tgtype & 3) = 1 -- bits: 0:ROW, 1:BEFORE -- current londiste and not londiste.is_replica_func(tg.tgfoid) -- old londiste and substring(tg.tgname from 1 for 10) != '_londiste_' and substring(tg.tgname from char_length(tg.tgname) - 6) != '_logger' order by 1 limit 1; else select tg.tgname into logtrg_previous from pg_class r join pg_trigger tg on (tg.tgrelid = r.oid) where r.oid = londiste.find_table_oid(_dest_table) and not tg.tgisconstraint and tg.tgname < lg_name::name -- per-row AFTER trigger and (tg.tgtype & 3) = 1 -- bits: 0:ROW, 1:BEFORE -- current londiste and not londiste.is_replica_func(tg.tgfoid) -- old londiste and substring(tg.tgname from 1 for 10) != '_londiste_' and substring(tg.tgname from char_length(tg.tgname) - 6) != '_logger' order by 1 limit 1; end if; if logtrg_previous is not null then select 301, 'Table added: ' || _desc || ', but londiste trigger is not first: ' || logtrg_previous into ret_code, ret_note; return; end if; select 200, 'Table added: ' || _desc into ret_code, ret_note; return; end; $$ language plpgsql; create or replace function londiste.local_add_table( in i_queue_name text, in i_table_name text, in i_trg_args text[], in i_table_attrs text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_add_table(4) -- -- Register table on Londiste node. -- ---------------------------------------------------------------------- begin select f.ret_code, f.ret_note into ret_code, ret_note from londiste.local_add_table(i_queue_name, i_table_name, i_trg_args, i_table_attrs, null) f; return; end; $$ language plpgsql; create or replace function londiste.local_add_table( in i_queue_name text, in i_table_name text, in i_trg_args text[], out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_add_table(3) -- -- Register table on Londiste node. -- ---------------------------------------------------------------------- begin select f.ret_code, f.ret_note into ret_code, ret_note from londiste.local_add_table(i_queue_name, i_table_name, i_trg_args, null) f; return; end; $$ language plpgsql; create or replace function londiste.local_add_table( in i_queue_name text, in i_table_name text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_add_table(2) -- -- Register table on Londiste node. -- ---------------------------------------------------------------------- begin select f.ret_code, f.ret_note into ret_code, ret_note from londiste.local_add_table(i_queue_name, i_table_name, null) f; return; end; $$ language plpgsql strict; create or replace function londiste.local_remove_seq( in i_queue_name text, in i_seq_name text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_remove_seq(2) -- -- Remove sequence. -- -- Parameters: -- i_queue_name - set name -- i_seq_name - sequence name -- -- Returns: -- 200 - OK -- 404 - Sequence not found -- ---------------------------------------------------------------------- declare fqname text; begin fqname := londiste.make_fqname(i_seq_name); if pgq_node.is_root_node(i_queue_name) then select f.ret_code, f.ret_note into ret_code, ret_note from londiste.global_remove_seq(i_queue_name, fqname) f; return; end if; update londiste.seq_info set local = false where queue_name = i_queue_name and seq_name = fqname and local; if not found then select 404, 'Sequence not found: '||fqname into ret_code, ret_note; return; end if; select 200, 'Sequence removed: '||fqname into ret_code, ret_note; return; end; $$ language plpgsql strict; create or replace function londiste.local_remove_table( in i_queue_name text, in i_table_name text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_remove_table(2) -- -- Remove table. -- -- Parameters: -- i_queue_name - set name -- i_table_name - table name -- -- Returns: -- 200 - OK -- 404 - Table not found -- ---------------------------------------------------------------------- declare fq_table_name text; qtbl text; seqname text; tbl record; tbl_oid oid; pgver integer; begin fq_table_name := londiste.make_fqname(i_table_name); qtbl := londiste.quote_fqname(fq_table_name); tbl_oid := londiste.find_table_oid(i_table_name); show server_version_num into pgver; select local, dropped_ddl, merge_state into tbl from londiste.table_info where queue_name = i_queue_name and table_name = fq_table_name for update; if not found then select 400, 'Table not found: ' || fq_table_name into ret_code, ret_note; return; end if; if tbl.local then perform londiste.drop_table_triggers(i_queue_name, fq_table_name); -- restore dropped ddl if tbl.dropped_ddl is not null then -- table is not synced, drop data to make restore faster if pgver >= 80400 then execute 'TRUNCATE ONLY ' || qtbl; else execute 'TRUNCATE ' || qtbl; end if; execute tbl.dropped_ddl; end if; -- reset data update londiste.table_info set local = false, custom_snapshot = null, table_attrs = null, dropped_ddl = null, merge_state = null, dest_table = null where queue_name = i_queue_name and table_name = fq_table_name; -- drop dependent sequence for seqname in select n.nspname || '.' || s.relname from pg_catalog.pg_class s, pg_catalog.pg_namespace n, pg_catalog.pg_attribute a where a.attrelid = tbl_oid and a.atthasdef and a.atttypid::regtype::text in ('integer', 'bigint') and s.oid = pg_get_serial_sequence(qtbl, a.attname)::regclass::oid and n.oid = s.relnamespace loop perform londiste.local_remove_seq(i_queue_name, seqname); end loop; else if not pgq_node.is_root_node(i_queue_name) then select 400, 'Table not registered locally: ' || fq_table_name into ret_code, ret_note; return; end if; end if; if pgq_node.is_root_node(i_queue_name) then perform londiste.global_remove_table(i_queue_name, fq_table_name); perform londiste.root_notify_change(i_queue_name, 'londiste.remove-table', fq_table_name); end if; select 200, 'Table removed: ' || fq_table_name into ret_code, ret_note; return; end; $$ language plpgsql strict; -- Group: Global object registrations (internal) create or replace function londiste.global_add_table( in i_queue_name text, in i_table_name text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.global_add_table(2) -- -- Register table on Londiste set. -- -- This means its available from root, events for it appear -- in queue and nodes can attach to it. -- -- Called by: -- on root - londiste.local_add_table() -- elsewhere - londiste consumer when receives new table event -- -- Returns: -- 200 - Ok -- 400 - No such set -- ---------------------------------------------------------------------- declare fq_table_name text; _cqueue text; begin fq_table_name := londiste.make_fqname(i_table_name); select combined_queue into _cqueue from pgq_node.node_info where queue_name = i_queue_name for update; if not found then select 400, 'No such queue: ' || i_queue_name into ret_code, ret_note; return; end if; perform 1 from londiste.table_info where queue_name = i_queue_name and table_name = fq_table_name; if found then select 200, 'Table already added: ' || fq_table_name into ret_code, ret_note; return; end if; insert into londiste.table_info (queue_name, table_name) values (i_queue_name, fq_table_name); select 200, 'Table added: ' || i_table_name into ret_code, ret_note; -- let the combined node know about it too if _cqueue is not null then perform londiste.global_add_table(_cqueue, i_table_name); end if; return; exception -- seems the row was added from parallel connection (setup vs. replay) when unique_violation then select 200, 'Table already added: ' || i_table_name into ret_code, ret_note; return; end; $$ language plpgsql strict; create or replace function londiste.global_remove_table( in i_queue_name text, in i_table_name text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.global_remove_table(2) -- -- Removes tables registration in set. -- -- Means that nodes cannot attach to this table anymore. -- -- Called by: -- - On root by londiste.local_remove_table() -- - Elsewhere by consumer receiving table remove event -- -- Returns: -- 200 - OK -- 400 - not found -- ---------------------------------------------------------------------- declare fq_table_name text; begin fq_table_name := londiste.make_fqname(i_table_name); if not pgq_node.is_root_node(i_queue_name) then perform londiste.local_remove_table(i_queue_name, fq_table_name); end if; delete from londiste.table_info where queue_name = i_queue_name and table_name = fq_table_name; if not found then select 400, 'Table not found: ' || fq_table_name into ret_code, ret_note; return; end if; select 200, 'Table removed: ' || i_table_name into ret_code, ret_note; return; end; $$ language plpgsql strict; create or replace function londiste.global_update_seq( in i_queue_name text, in i_seq_name text, in i_value int8, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.global_update_seq(3) -- -- Update seq. -- -- Parameters: -- i_queue_name - set name -- i_seq_name - seq name -- i_value - new published value -- -- Returns: -- 200 - OK -- ---------------------------------------------------------------------- declare n record; fqname text; seq record; begin select node_type, node_name into n from pgq_node.node_info where queue_name = i_queue_name; if not found then select 404, 'Set not found: ' || i_queue_name into ret_code, ret_note; return; end if; if n.node_type = 'root' then select 402, 'Must not run on root node' into ret_code, ret_note; return; end if; fqname := londiste.make_fqname(i_seq_name); select last_value, local from londiste.seq_info into seq where queue_name = i_queue_name and seq_name = fqname for update; if not found then insert into londiste.seq_info (queue_name, seq_name, last_value) values (i_queue_name, fqname, i_value); else update londiste.seq_info set last_value = i_value where queue_name = i_queue_name and seq_name = fqname; if seq.local then perform pgq.seq_setval(fqname, i_value); end if; end if; select 200, 'Sequence updated' into ret_code, ret_note; return; end; $$ language plpgsql; create or replace function londiste.global_remove_seq( in i_queue_name text, in i_seq_name text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.global_remove_seq(2) -- -- Removes sequence registration in set. -- -- Called by: -- - On root by londiste.local_remove_seq() -- - Elsewhere by consumer receiving seq remove event -- -- Returns: -- 200 - OK -- 400 - not found -- ---------------------------------------------------------------------- declare fq_name text; begin fq_name := londiste.make_fqname(i_seq_name); delete from londiste.seq_info where queue_name = i_queue_name and seq_name = fq_name; if not found then select 400, 'Sequence not found: '||fq_name into ret_code, ret_note; return; end if; if pgq_node.is_root_node(i_queue_name) then perform londiste.root_notify_change(i_queue_name, 'londiste.remove-seq', fq_name); end if; select 200, 'Sequence removed: '||fq_name into ret_code, ret_note; return; end; $$ language plpgsql strict; -- Group: FKey handling create or replace function londiste.get_table_pending_fkeys(i_table_name text) returns setof londiste.pending_fkeys as $$ -- ---------------------------------------------------------------------- -- Function: londiste.get_table_pending_fkeys(1) -- -- Return dropped fkeys for table. -- -- Parameters: -- i_table_name - fqname -- -- Returns: -- desc -- ---------------------------------------------------------------------- declare fkeys record; begin for fkeys in select * from londiste.pending_fkeys where from_table = i_table_name or to_table = i_table_name order by 1,2,3 loop return next fkeys; end loop; return; end; $$ language plpgsql strict stable; create or replace function londiste.get_valid_pending_fkeys(i_queue_name text) returns setof londiste.pending_fkeys as $$ -- ---------------------------------------------------------------------- -- Function: londiste.get_valid_pending_fkeys(1) -- -- Returns dropped fkeys where both sides are in sync now. -- -- Parameters: -- i_queue_name - cascaded queue name -- -- Returns: -- desc -- ---------------------------------------------------------------------- declare fkeys record; begin for fkeys in select pf.* from londiste.pending_fkeys pf order by 1, 2, 3 loop perform 1 from londiste.table_info st_from where coalesce(st_from.dest_table, st_from.table_name) = fkeys.from_table and st_from.merge_state = 'ok' and st_from.custom_snapshot is null and st_from.queue_name = i_queue_name; if not found then continue; end if; perform 1 from londiste.table_info st_to where coalesce(st_to.dest_table, st_to.table_name) = fkeys.to_table and st_to.merge_state = 'ok' and st_to.custom_snapshot is null and st_to.queue_name = i_queue_name; if not found then continue; end if; return next fkeys; end loop; return; end; $$ language plpgsql strict stable; create or replace function londiste.drop_table_fkey(i_from_table text, i_fkey_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: londiste.drop_table_fkey(2) -- -- Drop one fkey, save in pending table. -- ---------------------------------------------------------------------- declare fkey record; begin select * into fkey from londiste.find_table_fkeys(i_from_table) where fkey_name = i_fkey_name and from_table = i_from_table; if not found then return 0; end if; insert into londiste.pending_fkeys values (fkey.from_table, fkey.to_table, i_fkey_name, fkey.fkey_def); execute 'alter table only ' || londiste.quote_fqname(fkey.from_table) || ' drop constraint ' || quote_ident(i_fkey_name); return 1; end; $$ language plpgsql strict; create or replace function londiste.restore_table_fkey(i_from_table text, i_fkey_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: londiste.restore_table_fkey(2) -- -- Restore dropped fkey. -- -- Parameters: -- i_from_table - source table -- i_fkey_name - fkey name -- -- Returns: -- nothing -- ---------------------------------------------------------------------- declare fkey record; begin select * into fkey from londiste.pending_fkeys where fkey_name = i_fkey_name and from_table = i_from_table; if not found then return 0; end if; execute fkey.fkey_def; delete from londiste.pending_fkeys where fkey_name = fkey.fkey_name; return 1; end; $$ language plpgsql strict; -- Group: Execute handling create or replace function londiste.execute_start( in i_queue_name text, in i_file_name text, in i_sql text, in i_expect_root boolean, in i_attrs text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.execute_start(5) -- -- Start execution of DDL. Should be called at the -- start of the transaction that does the SQL execution. -- -- Called-by: -- Londiste setup tool on root, replay on branches/leafs. -- -- Parameters: -- i_queue_name - cascaded queue name -- i_file_name - Unique ID for SQL -- i_sql - Actual script (informative, not used here) -- i_expect_root - Is this on root? Setup tool sets this to avoid -- execution on branches. -- i_attrs - urlencoded dict of extra attributes. -- The value will be put into ev_extra2 -- field of outgoing event. -- -- Returns: -- 200 - Proceed. -- 201 - Already applied -- 401 - Not root. -- 404 - No such queue -- ---------------------------------------------------------------------- declare is_root boolean; begin is_root := pgq_node.is_root_node(i_queue_name); if i_expect_root then if not is_root then select 401, 'Node is not root node: ' || i_queue_name into ret_code, ret_note; return; end if; end if; perform 1 from londiste.applied_execute where execute_file = i_file_name; if found then select 201, 'EXECUTE: "' || i_file_name || '" already applied, skipping' into ret_code, ret_note; return; end if; -- this also lock against potetial parallel execute insert into londiste.applied_execute (queue_name, execute_file, execute_sql, execute_attrs) values (i_queue_name, i_file_name, i_sql, i_attrs); select 200, 'Executing: ' || i_file_name into ret_code, ret_note; return; end; $$ language plpgsql; create or replace function londiste.execute_start( in i_queue_name text, in i_file_name text, in i_sql text, in i_expect_root boolean, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.execute_start(4) -- -- Start execution of DDL. Should be called at the -- start of the transaction that does the SQL execution. -- -- Called-by: -- Londiste setup tool on root, replay on branches/leafs. -- -- Parameters: -- i_queue_name - cascaded queue name -- i_file_name - Unique ID for SQL -- i_sql - Actual script (informative, not used here) -- i_expect_root - Is this on root? Setup tool sets this to avoid -- execution on branches. -- -- Returns: -- 200 - Proceed. -- 301 - Already applied -- 401 - Not root. -- 404 - No such queue -- ---------------------------------------------------------------------- begin select f.ret_code, f.ret_note from londiste.execute_start(i_queue_name, i_file_name, i_sql, i_expect_root, null) f into ret_code, ret_note; return; end; $$ language plpgsql; create or replace function londiste.execute_finish( in i_queue_name text, in i_file_name text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.execute_finish(2) -- -- Finish execution of DDL. Should be called at the -- end of the transaction that does the SQL execution. -- -- Called-by: -- Londiste setup tool on root, replay on branches/leafs. -- -- Returns: -- 200 - Proceed. -- 404 - Current entry not found, execute_start() was not called? -- ---------------------------------------------------------------------- declare is_root boolean; sql text; attrs text; begin is_root := pgq_node.is_root_node(i_queue_name); select execute_sql, execute_attrs into sql, attrs from londiste.applied_execute where execute_file = i_file_name; if not found then select 404, 'execute_file called without execute_start' into ret_code, ret_note; return; end if; if is_root then perform pgq.insert_event(i_queue_name, 'EXECUTE', sql, i_file_name, attrs, null, null); end if; select 200, 'Execute finished: ' || i_file_name into ret_code, ret_note; return; end; $$ language plpgsql strict; -- Group: Internal functions create or replace function londiste.root_check_seqs( in i_queue_name text, in i_buffer int8, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.root_check_seqs(1) -- -- Check sequences, and publish values if needed. -- -- Parameters: -- i_queue_name - set name -- i_buffer - safety room -- -- Returns: -- 200 - OK -- 402 - Not a root node -- 404 - Queue not found -- ---------------------------------------------------------------------- declare n record; seq record; real_value int8; pub_value int8; real_buffer int8; begin if i_buffer is null or i_buffer < 10 then real_buffer := 10000; else real_buffer := i_buffer; end if; select node_type, node_name into n from pgq_node.node_info where queue_name = i_queue_name for update; if not found then select 404, 'Queue not found: ' || i_queue_name into ret_code, ret_note; return; end if; if n.node_type <> 'root' then select 402, 'Not a root node' into ret_code, ret_note; return; end if; for seq in select seq_name, last_value, londiste.quote_fqname(seq_name) as fqname from londiste.seq_info where queue_name = i_queue_name and local order by nr loop execute 'select last_value from ' || seq.fqname into real_value; if real_value + real_buffer >= seq.last_value then pub_value := real_value + real_buffer * 3; perform pgq.insert_event(i_queue_name, 'londiste.update-seq', pub_value::text, seq.seq_name, null, null, null); update londiste.seq_info set last_value = pub_value where queue_name = i_queue_name and seq_name = seq.seq_name; end if; end loop; select 100, 'Sequences updated' into ret_code, ret_note; return; end; $$ language plpgsql; create or replace function londiste.root_check_seqs( in i_queue_name text, out ret_code int4, out ret_note text) as $$ begin select f.ret_code, f.ret_note into ret_code, ret_note from londiste.root_check_seqs(i_queue_name, 10000) f; return; end; $$ language plpgsql; create or replace function londiste.root_notify_change(i_queue_name text, i_ev_type text, i_ev_data text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: londiste.root_notify_change(3) -- -- Send event about change in root downstream. -- ---------------------------------------------------------------------- declare que text; ntype text; begin if not coalesce(pgq_node.is_root_node(i_queue_name), false) then raise exception 'only root node can send events'; end if; perform pgq.insert_event(i_queue_name, i_ev_type, i_ev_data); return 1; end; $$ language plpgsql; create or replace function londiste.local_set_table_state( in i_queue_name text, in i_table_name text, in i_snapshot text, in i_merge_state text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_set_table_state(4) -- -- Change table state. -- -- Parameters: -- i_queue_name - cascaded queue name -- i_table - table name -- i_snapshot - optional remote snapshot info -- i_merge_state - merge state -- ---------------------------------------------------------------------- declare _tbl text; begin _tbl = londiste.make_fqname(i_table_name); update londiste.table_info set custom_snapshot = i_snapshot, merge_state = i_merge_state where queue_name = i_queue_name and table_name = _tbl and local; if not found then select 404, 'No such table: ' || _tbl into ret_code, ret_note; return; end if; select 200, 'Table ' || _tbl || ' state set to ' || coalesce(quote_literal(i_merge_state), 'NULL') into ret_code, ret_note; return; end; $$ language plpgsql; create or replace function londiste.local_set_table_attrs( in i_queue_name text, in i_table_name text, in i_table_attrs text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_set_table_attrs(3) -- -- Store urlencoded table attributes. -- -- Parameters: -- i_queue_name - cascaded queue name -- i_table - table name -- i_table_attrs - urlencoded attributes -- ---------------------------------------------------------------------- begin update londiste.table_info set table_attrs = i_table_attrs where queue_name = i_queue_name and table_name = i_table_name and local; if found then select 200, i_table_name || ': Table attributes stored' into ret_code, ret_note; else select 404, 'no such local table: ' || i_table_name into ret_code, ret_note; end if; return; end; $$ language plpgsql; create or replace function londiste.local_set_table_struct( in i_queue_name text, in i_table_name text, in i_dropped_ddl text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_set_table_struct(3) -- -- Store dropped table struct temporarily. -- -- Parameters: -- i_queue_name - cascaded queue name -- i_table - table name -- i_dropped_ddl - merge state -- ---------------------------------------------------------------------- begin update londiste.table_info set dropped_ddl = i_dropped_ddl where queue_name = i_queue_name and table_name = i_table_name and local; if found then select 200, 'Table struct stored' into ret_code, ret_note; else select 404, 'no such local table: '||i_table_name into ret_code, ret_note; end if; return; end; $$ language plpgsql; create or replace function londiste.periodic_maintenance() returns integer as $$ -- ---------------------------------------------------------------------- -- Function: londiste.periodic_maintenance(0) -- -- Clean random stuff. -- ---------------------------------------------------------------------- begin -- clean old EXECUTE entries delete from londiste.applied_execute where execute_time < now() - '3 months'::interval; return 0; end; $$ language plpgsql; -- need admin access -- Group: Utility functions create or replace function londiste.find_column_types(tbl text) returns text as $$ -- ---------------------------------------------------------------------- -- Function: londiste.find_column_types(1) -- -- Returns columnt type string for logtriga(). -- -- Parameters: -- tbl - fqname -- -- Returns: -- String of 'kv'. -- ---------------------------------------------------------------------- declare res text; col record; tbl_oid oid; begin tbl_oid := londiste.find_table_oid(tbl); res := ''; for col in SELECT CASE WHEN k.attname IS NOT NULL THEN 'k' ELSE 'v' END AS type FROM pg_attribute a LEFT JOIN ( SELECT k.attname FROM pg_index i, pg_attribute k WHERE i.indrelid = tbl_oid AND k.attrelid = i.indexrelid AND i.indisprimary AND k.attnum > 0 AND NOT k.attisdropped ) k ON (k.attname = a.attname) WHERE a.attrelid = tbl_oid AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum loop res := res || col.type; end loop; return res; end; $$ language plpgsql strict stable; create or replace function londiste.find_table_fkeys(i_table_name text) returns setof londiste.pending_fkeys as $$ -- ---------------------------------------------------------------------- -- Function: londiste.find_table_fkeys(1) -- -- Return all active fkeys. -- -- Parameters: -- i_table_name - fqname -- -- Returns: -- from_table - fqname -- to_table - fqname -- fkey_name - name -- fkey_def - full def -- ---------------------------------------------------------------------- declare fkey record; tbl_oid oid; begin select londiste.find_table_oid(i_table_name) into tbl_oid; for fkey in select n1.nspname || '.' || t1.relname as from_table, n2.nspname || '.' || t2.relname as to_table, conname::text as fkey_name, 'alter table only ' || quote_ident(n1.nspname) || '.' || quote_ident(t1.relname) || ' add constraint ' || quote_ident(conname::text) || ' ' || pg_get_constraintdef(c.oid) as fkey_def from pg_constraint c, pg_namespace n1, pg_class t1, pg_namespace n2, pg_class t2 where c.contype = 'f' and (c.conrelid = tbl_oid or c.confrelid = tbl_oid) and t1.oid = c.conrelid and n1.oid = t1.relnamespace and t2.oid = c.confrelid and n2.oid = t2.relnamespace order by 1,2,3 loop return next fkey; end loop; return; end; $$ language plpgsql strict stable; drop function if exists londiste.find_seq_oid(text); drop function if exists londiste.find_table_oid(text); drop function if exists londiste.find_rel_oid(text, text); create or replace function londiste.find_rel_oid(i_fqname text, i_kind text) returns oid as $$ -- ---------------------------------------------------------------------- -- Function: londiste.find_rel_oid(2) -- -- Find pg_class row oid. -- -- Parameters: -- i_fqname - fq object name -- i_kind - relkind value -- -- Returns: -- oid or exception of not found -- ---------------------------------------------------------------------- declare res oid; pos integer; schema text; name text; begin pos := position('.' in i_fqname); if pos > 0 then schema := substring(i_fqname for pos - 1); name := substring(i_fqname from pos + 1); else schema := 'public'; name := i_fqname; end if; select c.oid into res from pg_namespace n, pg_class c where c.relnamespace = n.oid and c.relkind = i_kind and n.nspname = schema and c.relname = name; if not found then res := NULL; end if; return res; end; $$ language plpgsql strict stable; create or replace function londiste.find_table_oid(tbl text) returns oid as $$ -- ---------------------------------------------------------------------- -- Function: londiste.find_table_oid(1) -- -- Find table oid based on fqname. -- -- Parameters: -- tbl - fqname -- -- Returns: -- oid -- ---------------------------------------------------------------------- begin return londiste.find_rel_oid(tbl, 'r'); end; $$ language plpgsql strict stable; create or replace function londiste.find_seq_oid(seq text) returns oid as $$ -- ---------------------------------------------------------------------- -- Function: londiste.find_seq_oid(1) -- -- Find sequence oid based on fqname. -- -- Parameters: -- seq - fqname -- -- Returns: -- oid -- ---------------------------------------------------------------------- begin return londiste.find_rel_oid(seq, 'S'); end; $$ language plpgsql strict stable; create or replace function londiste.quote_fqname(i_name text) returns text as $$ -- ---------------------------------------------------------------------- -- Function: londiste.quote_fqname(1) -- -- Quete fully-qualified object name for SQL. -- -- First dot is taken as schema separator. -- -- If schema is missing, 'public' is assumed. -- -- Parameters: -- i_name - fully qualified object name. -- -- Returns: -- Quoted name. -- ---------------------------------------------------------------------- declare res text; pos integer; s text; n text; begin pos := position('.' in i_name); if pos > 0 then s := substring(i_name for pos - 1); n := substring(i_name from pos + 1); else s := 'public'; n := i_name; end if; return quote_ident(s) || '.' || quote_ident(n); end; $$ language plpgsql strict immutable; create or replace function londiste.make_fqname(i_name text) returns text as $$ -- ---------------------------------------------------------------------- -- Function: londiste.make_fqname(1) -- -- Make name to schema-qualified one. -- -- First dot is taken as schema separator. -- -- If schema is missing, 'public' is assumed. -- -- Parameters: -- i_name - object name. -- -- Returns: -- Schema qualified name. -- ---------------------------------------------------------------------- begin if position('.' in i_name) > 0 then return i_name; else return 'public.' || i_name; end if; end; $$ language plpgsql strict immutable; create or replace function londiste.split_fqname( in i_fqname text, out schema_part text, out name_part text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.split_fqname(1) -- -- Split fqname to schema and name parts. -- -- First dot is taken as schema separator. -- -- If schema is missing, 'public' is assumed. -- -- Parameters: -- i_fqname - object name. -- ---------------------------------------------------------------------- declare dot integer; begin dot = position('.' in i_fqname); if dot > 0 then schema_part = substring(i_fqname for dot - 1); name_part = substring(i_fqname from dot + 1); else schema_part = 'public'; name_part = i_fqname; end if; return; end; $$ language plpgsql strict immutable; create or replace function londiste.table_info_trigger() returns trigger as $$ -- ---------------------------------------------------------------------- -- Function: londiste.table_info_trigger(0) -- -- Trigger on londiste.table_info. Cleans triggers from tables -- when table is removed from londiste.table_info. -- ---------------------------------------------------------------------- begin if TG_OP = 'DELETE' then perform londiste.drop_table_triggers(OLD.queue_name, OLD.table_name); end if; return null; end; $$ language plpgsql; create or replace function londiste.drop_table_triggers( in i_queue_name text, in i_table_name text) returns void as $$ -- ---------------------------------------------------------------------- -- Function: londiste.drop_table_triggers(2) -- -- Remove Londiste triggers from table. -- -- Parameters: -- i_queue_name - set name -- i_table_name - table name -- -- Returns: -- 200 - OK -- 404 - Table not found -- ---------------------------------------------------------------------- declare logtrg_name text; b_queue_name bytea; _dest_table text; begin select coalesce(dest_table, table_name) from londiste.table_info t where t.queue_name = i_queue_name and t.table_name = i_table_name into _dest_table; if not found then return; end if; -- skip if no triggers found on that table perform 1 from pg_catalog.pg_trigger where tgrelid = londiste.find_table_oid(_dest_table); if not found then return; end if; -- cast to bytea b_queue_name := decode(replace(i_queue_name, E'\\', E'\\\\'), 'escape'); -- drop all replication triggers that target our queue. -- by checking trigger func and queue name there is not -- dependency on naming standard or side-storage. for logtrg_name in select tgname from pg_catalog.pg_trigger where tgrelid = londiste.find_table_oid(_dest_table) and londiste.is_replica_func(tgfoid) and octet_length(tgargs) > 0 and substring(tgargs for (position(E'\\000'::bytea in tgargs) - 1)) = b_queue_name loop execute 'drop trigger ' || quote_ident(logtrg_name) || ' on ' || londiste.quote_fqname(_dest_table); end loop; end; $$ language plpgsql strict; create or replace function londiste.is_replica_func(func_oid oid) returns boolean as $$ -- ---------------------------------------------------------------------- -- Function: londiste.is_replica_func(1) -- -- Returns true if function is a PgQ-based replication functions. -- This also means it takes queue name as first argument. -- ---------------------------------------------------------------------- select count(1) > 0 from pg_proc f join pg_namespace n on (n.oid = f.pronamespace) where f.oid = $1 and n.nspname = 'pgq' and f.proname in ('sqltriga', 'logutriga'); $$ language sql strict stable; create or replace function londiste.version() returns text as $$ -- ---------------------------------------------------------------------- -- Function: londiste.version(0) -- -- Returns version string for londiste. ATM it is based on SkyTools version -- and only bumped when database code changes. -- ---------------------------------------------------------------------- begin return '3.1.1'; end; $$ language plpgsql; -- Group: Utility functions for handlers create or replace function londiste.create_partition( i_table text, i_part text, i_pkeys text, i_part_field text, i_part_time timestamptz, i_part_period text ) returns int as $$ ------------------------------------------------------------------------ -- Function: public.create_partition -- -- Creates inherited child table if it does not exist by copying parent table's structure. -- Locks parent table to avoid parallel creation. -- -- Elements that are copied over by "LIKE x INCLUDING ALL": -- * Defaults -- * Constraints -- * Indexes -- * Storage options (9.0+) -- * Comments (9.0+) -- -- Elements that are copied over manually because LIKE ALL does not support them: -- * Grants -- * Triggers -- * Rules -- -- Parameters: -- i_table - name of parent table -- i_part - name of partition table to create -- i_pkeys - primary key fields (comma separated, used to create constraint). -- i_part_field - field used to partition table (when not partitioned by field, value is NULL) -- i_part_time - partition time -- i_part_period - period of partitioned data, current possible values are 'hour', 'day', 'month' and 'year' -- -- Example: -- select londiste.create_partition('aggregate.user_call_monthly', 'aggregate.user_call_monthly_2010_01', 'key_user', 'period_start', '2010-01-10 11:00'::timestamptz, 'month'); -- ------------------------------------------------------------------------ declare chk_start text; chk_end text; part_start timestamptz; part_end timestamptz; parent_schema text; parent_name text; parent_oid oid; part_schema text; part_name text; pos int4; fq_table text; fq_part text; q_grantee text; g record; r record; tg record; sql text; pgver integer; r_oldtbl text; r_extra text; r_sql text; begin if i_table is null or i_part is null then raise exception 'need table and part'; end if; -- load postgres version (XYYZZ). show server_version_num into pgver; -- parent table schema and name + quoted name pos := position('.' in i_table); if pos > 0 then parent_schema := substring(i_table for pos - 1); parent_name := substring(i_table from pos + 1); else parent_schema := 'public'; parent_name := i_table; end if; fq_table := quote_ident(parent_schema) || '.' || quote_ident(parent_name); -- part table schema and name + quoted name pos := position('.' in i_part); if pos > 0 then part_schema := substring(i_part for pos - 1); part_name := substring(i_part from pos + 1); else part_schema := 'public'; part_name := i_part; end if; fq_part := quote_ident(part_schema) || '.' || quote_ident(part_name); -- allow only single creation at a time, without affecting DML operations execute 'lock table ' || fq_table || ' in share update exclusive mode'; parent_oid := fq_table::regclass::oid; -- check if part table exists perform 1 from pg_class t, pg_namespace s where t.relnamespace = s.oid and s.nspname = part_schema and t.relname = part_name; if found then return 0; end if; -- need to use 'like' to get indexes sql := 'create table ' || fq_part || ' (like ' || fq_table; if pgver >= 90000 then sql := sql || ' including all'; else sql := sql || ' including indexes including constraints including defaults'; end if; sql := sql || ') inherits (' || fq_table || ')'; execute sql; -- extra check constraint if i_part_field != '' then part_start := date_trunc(i_part_period, i_part_time); part_end := part_start + ('1 ' || i_part_period)::interval; chk_start := quote_literal(to_char(part_start, 'YYYY-MM-DD HH24:MI:SS')); chk_end := quote_literal(to_char(part_end, 'YYYY-MM-DD HH24:MI:SS')); sql := 'alter table '|| fq_part || ' add check (' || quote_ident(i_part_field) || ' >= ' || chk_start || ' and ' || quote_ident(i_part_field) || ' < ' || chk_end || ')'; execute sql; end if; -- load grants from parent table for g in select grantor, grantee, privilege_type, is_grantable from information_schema.table_privileges where table_schema = parent_schema and table_name = parent_name loop if g.grantee = 'PUBLIC' then q_grantee = 'public'; else q_grantee := quote_ident(g.grantee); end if; sql := 'grant ' || g.privilege_type || ' on ' || fq_part || ' to ' || q_grantee; if g.is_grantable = 'YES' then sql := sql || ' with grant option'; end if; execute sql; end loop; -- generate triggers info query sql := 'SELECT tgname, tgenabled,' || ' pg_catalog.pg_get_triggerdef(oid) as tgdef' || ' FROM pg_catalog.pg_trigger ' || ' WHERE tgrelid = ' || parent_oid::text || ' AND '; if pgver >= 90000 then sql := sql || ' NOT tgisinternal'; else sql := sql || ' NOT tgisconstraint'; end if; -- copy triggers for tg in execute sql loop sql := regexp_replace(tg.tgdef, E' ON ([[:alnum:]_.]+|"([^"]|"")+")+ ', ' ON ' || fq_part || ' '); if sql = tg.tgdef then raise exception 'Failed to reconstruct the trigger: %', sql; end if; execute sql; if tg.tgenabled = 'O' then -- standard mode r_extra := NULL; elsif tg.tgenabled = 'D' then r_extra := ' DISABLE TRIGGER '; elsif tg.tgenabled = 'A' then r_extra := ' ENABLE ALWAYS TRIGGER '; elsif tg.tgenabled = 'R' then r_extra := ' ENABLE REPLICA TRIGGER '; else raise exception 'Unknown trigger mode: %', tg.tgenabled; end if; if r_extra is not null then sql := 'ALTER TABLE ' || fq_part || r_extra || quote_ident(tg.tgname); execute sql; end if; end loop; -- copy rules for r in select rw.rulename, rw.ev_enabled, pg_get_ruledef(rw.oid) as definition from pg_catalog.pg_rewrite rw where rw.ev_class = parent_oid and rw.rulename <> '_RETURN'::name loop -- try to skip rule name r_extra := 'CREATE RULE ' || quote_ident(r.rulename) || ' AS'; r_sql := substr(r.definition, 1, char_length(r_extra)); if r_sql = r_extra then r_sql := substr(r.definition, char_length(r_extra)); else raise exception 'failed to match rule name'; end if; -- no clue what name was used in defn, so find it from sql r_oldtbl := substring(r_sql from ' TO (([[:alnum:]_.]+|"([^"]+|"")+")+)[[:space:]]'); if char_length(r_oldtbl) > 0 then sql := replace(r.definition, r_oldtbl, fq_part); else raise exception 'failed to find original table name'; end if; execute sql; -- rule flags r_extra := NULL; if r.ev_enabled = 'R' then r_extra = ' ENABLE REPLICA RULE '; elsif r.ev_enabled = 'A' then r_extra = ' ENABLE ALWAYS RULE '; elsif r.ev_enabled = 'D' then r_extra = ' DISABLE RULE '; elsif r.ev_enabled <> 'O' then raise exception 'unknown rule option: %', r.ev_enabled; end if; if r_extra is not null then sql := 'ALTER TABLE ' || fq_part || r_extra || quote_ident(r.rulename); end if; end loop; return 1; end; $$ language plpgsql; create trigger table_info_trigger_sync after delete on londiste.table_info for each row execute procedure londiste.table_info_trigger(); grant usage on schema londiste to public; grant select on londiste.table_info to public; grant select on londiste.seq_info to public; grant select on londiste.pending_fkeys to public; grant select on londiste.applied_execute to public; skytools-3.2.6/upgrade/final/v2.1.6_pgq_ext.sql0000644000000000000000000000021112426435645016126 0ustar begin; create or replace function pgq_ext.version() returns text as $$ begin return '2.1.6'; end; $$ language plpgsql; end; skytools-3.2.6/upgrade/final/v2.1.5_londiste.sql0000644000000000000000000003620212426435645016310 0ustar begin; create table londiste.subscriber_pending_fkeys( from_table text not null, to_table text not null, fkey_name text not null, fkey_def text not null, primary key (from_table, fkey_name) ); create table londiste.subscriber_pending_triggers ( table_name text not null, trigger_name text not null, trigger_def text not null, primary key (table_name, trigger_name) ); -- drop function londiste.denytrigger(); create or replace function londiste.find_table_fkeys(i_table_name text) returns setof londiste.subscriber_pending_fkeys as $$ declare fkey record; tbl_oid oid; begin select londiste.find_table_oid(i_table_name) into tbl_oid; for fkey in select n1.nspname || '.' || t1.relname as from_table, n2.nspname || '.' || t2.relname as to_table, conname::text as fkey_name, 'alter table only ' || quote_ident(n1.nspname) || '.' || quote_ident(t1.relname) || ' add constraint ' || quote_ident(conname::text) || ' ' || pg_get_constraintdef(c.oid) as fkey_def from pg_constraint c, pg_namespace n1, pg_class t1, pg_namespace n2, pg_class t2 where c.contype = 'f' and (c.conrelid = tbl_oid or c.confrelid = tbl_oid) and t1.oid = c.conrelid and n1.oid = t1.relnamespace and t2.oid = c.confrelid and n2.oid = t2.relnamespace order by 1,2,3 loop return next fkey; end loop; return; end; $$ language plpgsql strict stable; create or replace function londiste.find_table_triggers(i_table_name text) returns setof londiste.subscriber_pending_triggers as $$ declare tg record; begin for tg in select n.nspname || '.' || c.relname as table_name, t.tgname::text as name, pg_get_triggerdef(t.oid) as def from pg_trigger t, pg_class c, pg_namespace n where n.oid = c.relnamespace and c.oid = t.tgrelid and t.tgrelid = londiste.find_table_oid(i_table_name) and not t.tgisconstraint loop return next tg; end loop; return; end; $$ language plpgsql strict stable; create or replace function londiste.find_column_types(tbl text) returns text as $$ declare res text; col record; tbl_oid oid; begin tbl_oid := londiste.find_table_oid(tbl); res := ''; for col in SELECT CASE WHEN k.attname IS NOT NULL THEN 'k' ELSE 'v' END AS type FROM pg_attribute a LEFT JOIN ( SELECT k.attname FROM pg_index i, pg_attribute k WHERE i.indrelid = tbl_oid AND k.attrelid = i.indexrelid AND i.indisprimary AND k.attnum > 0 AND NOT k.attisdropped ) k ON (k.attname = a.attname) WHERE a.attrelid = tbl_oid AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum loop res := res || col.type; end loop; return res; end; $$ language plpgsql strict stable; create or replace function londiste.subscriber_get_table_pending_fkeys(i_table_name text) returns setof londiste.subscriber_pending_fkeys as $$ declare fkeys record; begin for fkeys in select * from londiste.subscriber_pending_fkeys where from_table=i_table_name or to_table=i_table_name order by 1,2,3 loop return next fkeys; end loop; return; end; $$ language plpgsql; create or replace function londiste.subscriber_get_queue_valid_pending_fkeys(i_queue_name text) returns setof londiste.subscriber_pending_fkeys as $$ declare fkeys record; begin for fkeys in select pf.* from londiste.subscriber_pending_fkeys pf left join londiste.subscriber_table st_from on (st_from.table_name = pf.from_table) left join londiste.subscriber_table st_to on (st_to.table_name = pf.to_table) where (st_from.table_name is null or (st_from.merge_state = 'ok' and st_from.snapshot is null)) and (st_to.table_name is null or (st_to.merge_state = 'ok' and st_to.snapshot is null)) and (coalesce(st_from.queue_name = i_queue_name, false) or coalesce(st_to.queue_name = i_queue_name, false)) order by 1, 2, 3 loop return next fkeys; end loop; return; end; $$ language plpgsql; create or replace function londiste.subscriber_drop_table_fkey(i_from_table text, i_fkey_name text) returns integer as $$ declare fkey record; begin select * into fkey from londiste.find_table_fkeys(i_from_table) where fkey_name = i_fkey_name and from_table = i_from_table; if not found then return 0; end if; insert into londiste.subscriber_pending_fkeys values (fkey.from_table, fkey.to_table, i_fkey_name, fkey.fkey_def); execute 'alter table only ' || londiste.quote_fqname(fkey.from_table) || ' drop constraint ' || quote_ident(i_fkey_name); return 1; end; $$ language plpgsql; create or replace function londiste.subscriber_restore_table_fkey(i_from_table text, i_fkey_name text) returns integer as $$ declare fkey record; begin select * into fkey from londiste.subscriber_pending_fkeys where fkey_name = i_fkey_name and from_table = i_from_table; if not found then return 0; end if; delete from londiste.subscriber_pending_fkeys where fkey_name = fkey.fkey_name; execute fkey.fkey_def; return 1; end; $$ language plpgsql; create or replace function londiste.subscriber_get_table_pending_triggers(i_table_name text) returns setof londiste.subscriber_pending_triggers as $$ declare trigger record; begin for trigger in select * from londiste.subscriber_pending_triggers where table_name = i_table_name loop return next trigger; end loop; return; end; $$ language plpgsql strict stable; create or replace function londiste.subscriber_drop_table_trigger(i_table_name text, i_trigger_name text) returns integer as $$ declare trig_def record; begin select * into trig_def from londiste.find_table_triggers(i_table_name) where trigger_name = i_trigger_name; if FOUND is not true then return 0; end if; insert into londiste.subscriber_pending_triggers(table_name, trigger_name, trigger_def) values (i_table_name, i_trigger_name, trig_def.trigger_def); execute 'drop trigger ' || i_trigger_name || ' on ' || i_table_name; return 1; end; $$ language plpgsql; create or replace function londiste.subscriber_drop_all_table_triggers(i_table_name text) returns integer as $$ declare trigger record; begin for trigger in select trigger_name as name from londiste.find_table_triggers(i_table_name) loop perform londiste.subscriber_drop_table_trigger(i_table_name, trigger.name); end loop; return 1; end; $$ language plpgsql; create or replace function londiste.subscriber_restore_table_trigger(i_table_name text, i_trigger_name text) returns integer as $$ declare trig_def text; begin select trigger_def into trig_def from londiste.subscriber_pending_triggers where (table_name, trigger_name) = (i_table_name, i_trigger_name); if not found then return 0; end if; delete from londiste.subscriber_pending_triggers where table_name = i_table_name and trigger_name = i_trigger_name; execute trig_def; return 1; end; $$ language plpgsql; create or replace function londiste.subscriber_restore_all_table_triggers(i_table_name text) returns integer as $$ declare trigger record; begin for trigger in select trigger_name as name from londiste.subscriber_get_table_pending_triggers(i_table_name) loop perform londiste.subscriber_restore_table_trigger(i_table_name, trigger.name); end loop; return 1; end; $$ language plpgsql; create or replace function londiste.quote_fqname(i_name text) returns text as $$ declare res text; pos integer; s text; n text; begin pos := position('.' in i_name); if pos > 0 then s := substring(i_name for pos - 1); n := substring(i_name from pos + 1); else s := 'public'; n := i_name; end if; return quote_ident(s) || '.' || quote_ident(n); end; $$ language plpgsql strict immutable; create or replace function londiste.find_rel_oid(tbl text, kind text) returns oid as $$ declare res oid; pos integer; schema text; name text; begin pos := position('.' in tbl); if pos > 0 then schema := substring(tbl for pos - 1); name := substring(tbl from pos + 1); else schema := 'public'; name := tbl; end if; select c.oid into res from pg_namespace n, pg_class c where c.relnamespace = n.oid and c.relkind = kind and n.nspname = schema and c.relname = name; if not found then if kind = 'r' then raise exception 'table not found'; elsif kind = 'S' then raise exception 'seq not found'; else raise exception 'weird relkind'; end if; end if; return res; end; $$ language plpgsql strict stable; create or replace function londiste.find_table_oid(tbl text) returns oid as $$ begin return londiste.find_rel_oid(tbl, 'r'); end; $$ language plpgsql strict stable; create or replace function londiste.find_seq_oid(tbl text) returns oid as $$ begin return londiste.find_rel_oid(tbl, 'S'); end; $$ language plpgsql strict stable; create or replace function londiste.get_last_tick(i_consumer text) returns bigint as $$ declare res bigint; begin select last_tick_id into res from londiste.completed where consumer_id = i_consumer; return res; end; $$ language plpgsql security definer strict stable; create or replace function londiste.provider_add_table( i_queue_name text, i_table_name text, i_col_types text ) returns integer strict as $$ declare tgname text; sql text; begin if londiste.link_source(i_queue_name) is not null then raise exception 'Linked queue, manipulation not allowed'; end if; if position('k' in i_col_types) < 1 then raise exception 'need key column'; end if; if position('.' in i_table_name) < 1 then raise exception 'need fully-qualified table name'; end if; select queue_name into tgname from pgq.queue where queue_name = i_queue_name; if not found then raise exception 'no such event queue'; end if; tgname := i_queue_name || '_logger'; tgname := replace(lower(tgname), '.', '_'); insert into londiste.provider_table (queue_name, table_name, trigger_name) values (i_queue_name, i_table_name, tgname); perform londiste.provider_create_trigger( i_queue_name, i_table_name, i_col_types); return 1; end; $$ language plpgsql security definer; create or replace function londiste.provider_add_table( i_queue_name text, i_table_name text ) returns integer as $$ begin return londiste.provider_add_table(i_queue_name, i_table_name, londiste.find_column_types(i_table_name)); end; $$ language plpgsql security definer; create or replace function londiste.provider_create_trigger( i_queue_name text, i_table_name text, i_col_types text ) returns integer strict as $$ declare tgname text; begin select trigger_name into tgname from londiste.provider_table where queue_name = i_queue_name and table_name = i_table_name; if not found then raise exception 'table not found'; end if; execute 'create trigger ' || tgname || ' after insert or update or delete on ' || i_table_name || ' for each row execute procedure pgq.logtriga(' || quote_literal(i_queue_name) || ', ' || quote_literal(i_col_types) || ', ' || quote_literal(i_table_name) || ')'; return 1; end; $$ language plpgsql security definer; create or replace function londiste.provider_notify_change(i_queue_name text) returns integer as $$ declare res text; tbl record; begin res := ''; for tbl in select table_name from londiste.provider_table where queue_name = i_queue_name order by nr loop if res = '' then res := tbl.table_name; else res := res || ',' || tbl.table_name; end if; end loop; perform pgq.insert_event(i_queue_name, 'T', res); return 1; end; $$ language plpgsql security definer; create or replace function londiste.provider_remove_table( i_queue_name text, i_table_name text ) returns integer as $$ declare tgname text; begin if londiste.link_source(i_queue_name) is not null then raise exception 'Linked queue, manipulation not allowed'; end if; select trigger_name into tgname from londiste.provider_table where queue_name = i_queue_name and table_name = i_table_name; if not found then raise exception 'no such table registered'; end if; begin execute 'drop trigger ' || tgname || ' on ' || i_table_name; exception when undefined_table then raise notice 'table % does not exist', i_table_name; when undefined_object then raise notice 'trigger % does not exist on table %', tgname, i_table_name; end; delete from londiste.provider_table where queue_name = i_queue_name and table_name = i_table_name; return 1; end; $$ language plpgsql security definer; create or replace function londiste.set_last_tick( i_consumer text, i_tick_id bigint) returns integer as $$ begin if i_tick_id is null then delete from londiste.completed where consumer_id = i_consumer; else update londiste.completed set last_tick_id = i_tick_id where consumer_id = i_consumer; if not found then insert into londiste.completed (consumer_id, last_tick_id) values (i_consumer, i_tick_id); end if; end if; return 1; end; $$ language plpgsql security definer; create or replace function londiste.subscriber_remove_table( i_queue_name text, i_table text) returns integer as $$ declare link text; begin delete from londiste.subscriber_table where queue_name = i_queue_name and table_name = i_table; if not found then raise exception 'no such table'; end if; -- sync link link := londiste.link_dest(i_queue_name); if link is not null then delete from londiste.provider_table where queue_name = link and table_name = i_table; perform londiste.provider_notify_change(link); end if; return 0; end; $$ language plpgsql security definer; grant usage on schema londiste to public; grant select on londiste.provider_table to public; grant select on londiste.completed to public; grant select on londiste.link to public; grant select on londiste.subscriber_table to public; end; skytools-3.2.6/upgrade/src/0000755000000000000000000000000012426435645012530 5ustar skytools-3.2.6/upgrade/src/v2.1.5_pgq_core.sql0000644000000000000000000000124612426435645015764 0ustar begin; alter table pgq.subscription add constraint subscription_ukey unique (sub_queue, sub_consumer); create index rq_retry_owner_idx on pgq.retry_queue (ev_owner, ev_id); \i ../sql/pgq/functions/pgq.current_event_table.sql \i ../sql/pgq/functions/pgq.event_failed.sql \i ../sql/pgq/functions/pgq.event_retry.sql \i ../sql/pgq/functions/pgq.force_tick.sql \i ../sql/pgq/functions/pgq.grant_perms.sql \i ../sql/pgq/functions/pgq.insert_event.sql \i ../sql/pgq/functions/pgq.maint_tables_to_vacuum.sql \i ../sql/pgq/functions/pgq.next_batch.sql \i ../sql/pgq/functions/pgq.register_consumer.sql \i ../sql/pgq/functions/pgq.version.sql \i ../sql/pgq/structure/grants.sql end; skytools-3.2.6/upgrade/src/v2.1.5_pgq_ext.sql0000644000000000000000000000007212426435645015630 0ustar begin; \i ../sql/pgq_ext/functions/track_tick.sql end; skytools-3.2.6/upgrade/src/v2.1.6_londiste.sql0000644000000000000000000000010112426435645015774 0ustar begin; \i ../sql/londiste/functions/londiste.version.sql end; skytools-3.2.6/upgrade/src/londiste.2to3.sql0000644000000000000000000000007312426435645015660 0ustar drop function if exists londiste.find_table_fkeys(text); skytools-3.2.6/upgrade/src/v3.0_pgq_core.sql0000644000000000000000000000500112426435645015612 0ustar begin; -- new fields to pgq.queue alter table pgq.queue add column queue_disable_insert boolean; alter table pgq.queue add column queue_ticker_paused boolean; alter table pgq.queue add column queue_per_tx_limit int4; update pgq.queue set queue_disable_insert=false, queue_ticker_paused=false; alter table pgq.queue alter column queue_disable_insert set not null; alter table pgq.queue alter column queue_disable_insert set default false; alter table pgq.queue alter column queue_ticker_paused set not null; alter table pgq.queue alter column queue_ticker_paused set default false; -- new field to pgq.tick alter table pgq.tick add column tick_event_seq int8; -- surgery on pgq.retry_queue alter table pgq.retry_queue add column ev_queue int4; update pgq.retry_queue set ev_queue = sub_queue from pgq.subscription where ev_owner = sub_id; alter table pgq.retry_queue alter column ev_queue set not null; drop index pgq.rq_retry_owner_idx; -- surgery on pgq.subscription alter table pgq.retry_queue drop constraint rq_owner_fkey; alter table pgq.failed_queue drop constraint fq_owner_fkey; alter table pgq.subscription drop constraint subscription_pkey; alter table pgq.subscription drop constraint subscription_ukey; alter table pgq.subscription add constraint subscription_pkey primary key (sub_queue, sub_consumer); alter table pgq.subscription add constraint subscription_batch_idx unique (sub_batch); alter table pgq.subscription alter column sub_last_tick drop not null; -- drop failed queue functionality. not mandatory, who wants can keep it. drop function pgq.failed_event_list(text, text); drop function pgq.failed_event_list(text, text, integer, integer); drop function pgq.failed_event_count(text, text); drop function pgq.failed_event_delete(text, text, bigint); drop function pgq.failed_event_retry(text, text, bigint); drop function pgq.event_failed(bigint, bigint, text); drop table pgq.failed_queue; -- drop obsolete functions drop function pgq.ticker(text, bigint); drop function pgq.register_consumer(text, text, int8); -- drop types and related functions drop function pgq.get_batch_events(bigint); drop function pgq.get_batch_info(bigint); drop function pgq.get_consumer_info(); drop function pgq.get_consumer_info(text); drop function pgq.get_consumer_info(text, text); drop function pgq.get_queue_info(); drop function pgq.get_queue_info(text); drop type pgq.ret_batch_event; drop type pgq.ret_batch_info; drop type pgq.ret_consumer_info; drop type pgq.ret_queue_info; -- update all functions \i ../sql/pgq/pgq.upgrade.sql end; skytools-3.2.6/upgrade/src/v2.1.6_pgq_ext.sql0000644000000000000000000000006712426435645015635 0ustar begin; \i ../sql/pgq_ext/functions/version.sql end; skytools-3.2.6/upgrade/src/v2.1.5_londiste.sql0000644000000000000000000000262412426435645016007 0ustar begin; create table londiste.subscriber_pending_fkeys( from_table text not null, to_table text not null, fkey_name text not null, fkey_def text not null, primary key (from_table, fkey_name) ); create table londiste.subscriber_pending_triggers ( table_name text not null, trigger_name text not null, trigger_def text not null, primary key (table_name, trigger_name) ); -- drop function londiste.denytrigger(); \i ../sql/londiste/functions/londiste.find_table_fkeys.sql \i ../sql/londiste/functions/londiste.find_table_triggers.sql \i ../sql/londiste/functions/londiste.find_column_types.sql \i ../sql/londiste/functions/londiste.subscriber_fkeys_funcs.sql \i ../sql/londiste/functions/londiste.subscriber_trigger_funcs.sql \i ../sql/londiste/functions/londiste.quote_fqname.sql \i ../sql/londiste/functions/londiste.find_table_oid.sql \i ../sql/londiste/functions/londiste.get_last_tick.sql \i ../sql/londiste/functions/londiste.provider_add_table.sql \i ../sql/londiste/functions/londiste.provider_create_trigger.sql \i ../sql/londiste/functions/londiste.provider_notify_change.sql \i ../sql/londiste/functions/londiste.provider_remove_table.sql \i ../sql/londiste/functions/londiste.set_last_tick.sql \i ../sql/londiste/functions/londiste.subscriber_remove_table.sql \i ../sql/londiste/structure/grants.sql end; skytools-3.2.6/upgrade/Makefile0000644000000000000000000000212212426435645013376 0ustar #SQLS = v2.1.5_londiste.sql v2.1.5_pgq_core.sql v2.1.5_pgq_ext.sql #SQLS = v2.1.6_londiste.sql v2.1.6_pgq_ext.sql #SQLS = v3.0_pgq_core.sql #SQLS = londiste.upgrade_2.1_to_3.1.sql SRCS = $(addprefix src/, $(SQLS)) DSTS = $(addprefix final/, $(SQLS)) CATSQL = $(PYTHON) ../scripts/catsql.py all: $(DSTS) final/%.sql: src/%.sql $(CATSQL) $< > $@ final/londiste.upgrade_2.1_to_3.1.sql: src/londiste.2to3.sql ../sql/londiste/londiste.sql echo "begin;" > $@ cat src/londiste.2to3.sql >> $@ grep -v 'create schema' ../sql/londiste/londiste.sql >> $@ echo "commit;" >> $@ PSQL = psql -q ltest: ../sql/pgq_node/pgq_node.sql $(PSQL) -d postgres -c "drop database if exists londiste_upgrade_test" $(PSQL) -d postgres -c "create database londiste_upgrade_test" $(PSQL) -d londiste_upgrade_test -f final/pgq_core_2.1.13.sql $(PSQL) -d londiste_upgrade_test -f final/londiste.2.1.12.sql $(PSQL) -d londiste_upgrade_test -f final/pgq.upgrade_2.1_to_3.0.sql $(PSQL) -d londiste_upgrade_test -f ../sql/pgq_node/pgq_node.sql $(PSQL) -d londiste_upgrade_test -f final/londiste.upgrade_2.1_to_3.1.sql skytools-3.2.6/setup_skytools.py0000755000000000000000000001510312426435645013776 0ustar #! /usr/bin/env python # this script installs Python modules, scripts and sql files # custom switches for install: # --sk3-subdir install modules into "skytools-3.0" subdir # --skylog use "skylog" logging by default # non-working switches # --script-suffix= add suffix to scripts import sys, os.path, re from distutils.core import setup from distutils.extension import Extension from distutils.command.build import build from distutils.command.build_scripts import build_scripts from distutils.command.install import install from subprocess import Popen INSTALL_SCRIPTS = 1 INSTALL_SQL = 1 # don't build C module on win32 as it's unlikely to have dev env BUILD_C_MOD = 1 if sys.platform == 'win32': BUILD_C_MOD = 0 # load version buf = open("configure.ac","r").read(256) m = re.search("AC_INIT[(][^,]*,\s+([^)]*)[)]", buf) ac_ver = m.group(1) # scripts that we add suffix sfx_scripts = [ 'python/londiste.py', 'python/walmgr.py', 'scripts/data_maintainer.py', 'scripts/queue_mover.py', 'scripts/queue_splitter.py', 'scripts/scriptmgr.py', 'scripts/simple_consumer.py', 'scripts/simple_local_consumer.py', 'scripts/skytools_upgrade.py', ] # those do not need suffix (no conflict with 2.1) nosfx_scripts = [ 'python/qadmin.py', ] if not INSTALL_SCRIPTS: sfx_scripts = [] nosfx_scripts = [] # sql files we want to access from python sql_files = [ 'sql/pgq/pgq.sql', 'sql/londiste/londiste.sql', 'sql/pgq_node/pgq_node.sql', 'sql/pgq_coop/pgq_coop.sql', 'sql/pgq_ext/pgq_ext.sql', 'sql/pgq/pgq.upgrade.sql', 'sql/pgq_node/pgq_node.upgrade.sql', 'sql/londiste/londiste.upgrade.sql', 'sql/pgq_coop/pgq_coop.upgrade.sql', 'sql/pgq_ext/pgq_ext.upgrade.sql', 'upgrade/final/pgq.upgrade_2.1_to_3.0.sql', 'upgrade/final/londiste.upgrade_2.1_to_3.1.sql', ] # sql files for special occasions extra_sql_files = [ #'upgrade/final/v3.0_pgq_core.sql', ] if not INSTALL_SQL: sql_files = [] extra_sql_files = [] def getvar(name, default): try: cf = open('config.mak').read() m = re.search(r'^%s *= *(.*)' % name, cf, re.M) if m: return m.group(1).strip() except IOError: pass return default # don't rename scripts on win32 if sys.platform == 'win32': DEF_SUFFIX = '.py' DEF_NOSUFFIX = '.py' else: DEF_SUFFIX = '' DEF_NOSUFFIX = '' # load defaults from config.mak DEF_SUFFIX = getvar('SUFFIX', DEF_SUFFIX) DEF_SKYLOG = getvar('SKYLOG', '0') != '0' DEF_SK3_SUBDIR = getvar('SK3_SUBDIR', '0') != '0' # create sql files if they don't exist def make_sql(): for fn in sql_files: if not os.path.isfile(fn): f = open(fn, 'w') wd = os.path.dirname(fn) if fn.endswith('upgrade.sql'): base = 'structure/upgrade.sql' else: base = 'structure/install.sql' print("Creating %s" % (fn,)) cmd = [sys.executable, '../../scripts/catsql.py', base] p = Popen(cmd, stdout=f, cwd = wd) p.communicate() if p.returncode != 0: raise Exception('catsql failed') # remove .py, add suffix def fixscript(fn, dstdir, sfx): fn = os.path.basename(fn) fn2 = fn.replace('.py', sfx) if fn == fn2: return dfn = os.path.join(dstdir, fn) dfn2 = os.path.join(dstdir, fn2) if '-q' not in sys.argv: print("Renaming %s -> %s" % (dfn, fn2)) if sys.platform == 'win32' and os.path.isfile(dfn2): os.remove(dfn2) os.rename(dfn, dfn2) # rename build dir class sk3_build(build): def initialize_options(self): build.initialize_options(self) self.build_base = 'build.sk3' def run(self): build.run(self) make_sql() # fix script names in build dir class sk3_build_scripts(build_scripts): def run(self): build_scripts.run(self) for sfn in sfx_scripts: fixscript(sfn, self.build_dir, DEF_SUFFIX) for sfn in nosfx_scripts: fixscript(sfn, self.build_dir, DEF_NOSUFFIX) # wrap generic install command class sk3_install(install): user_options = install.user_options + [ ('sk3-subdir', None, 'install modules into "skytools-3.0" subdir'), ('skylog', None, 'use "skylog" logging by default'), ] boolean_options = ['sk3-subdir', 'skylog'] sk3_subdir = DEF_SK3_SUBDIR skylog = DEF_SKYLOG def run(self): # create installer_config.py with final paths fn = 'python/skytools/installer_config.py' cf = open(fn + '.in', 'r').read() cf = cf.replace('@SQLDIR@', os.path.join(self.prefix, 'share/skytools3')) cf = cf.replace('@PACKAGE_VERSION@', ac_ver) cf = cf.replace('@SKYLOG@', self.skylog and '1' or '0') open(fn, 'w').write(cf) # move python modules if self.sk3_subdir: subdir = 'skytools-3.0' self.install_lib = os.path.join(self.install_lib, subdir) self.install_purelib = os.path.join(self.install_purelib, subdir) self.install_platlib = os.path.join(self.install_platlib, subdir) # generic install install.run(self) # check if building C is allowed c_modules = [] if BUILD_C_MOD: ext = [ Extension("skytools._cquoting", ['python/modules/cquoting.c']), Extension("skytools._chashtext", ['python/modules/hashtext.c']), ] c_modules.extend(ext) # run actual setup setup( name = "skytools", license = "ISC", version = ac_ver, maintainer = "Marko Kreen", maintainer_email = "markokr@gmail.com", url = "http://pgfoundry.org/projects/skytools/", description = "SkyTools - tools for PostgreSQL", platforms = "POSIX, MacOS, Windows", package_dir = {'': 'python'}, packages = ['skytools', 'londiste', 'londiste.handlers', 'pgq', 'pgq.cascade'], data_files = [ ('share/doc/skytools3/conf', [ 'python/conf/wal-master.ini', 'python/conf/wal-slave.ini', ]), ('share/skytools3', sql_files), #('share/skytools3/extra', extra_sql_files), ], ext_modules = c_modules, scripts = sfx_scripts + nosfx_scripts, cmdclass = { 'build': sk3_build, 'build_scripts': sk3_build_scripts, 'install': sk3_install, }, long_description = """ This is a package of tools developed at Skype for replication and failover. It includes a generic queuing framework (PgQ), easy-to-use replication implementation (Londiste), tool for managing WAL based standby servers, utility library for Python scripts, selection of scripts for specific jobs. """ ) skytools-3.2.6/doc/0000755000000000000000000000000012426435645011057 5ustar skytools-3.2.6/doc/scriptmgr.txt0000644000000000000000000000533212426435645013635 0ustar = scriptmgr(1) = == NAME == scriptmgr - utility for controlling other skytools scripts. == SYNOPSIS == scriptmgr.py [switches] config.ini [-a | -t service | job_name...] == DESCRIPTION == scriptmgr is used to manage several scripts together. It discovers potential jobs based on config file glob expression. From config file it gets both job_name and service type (that is the main section name, e.g. [cube_dispatcher]). For each service type there is subsection in the config how to handle it. Unknown services are ignored. == COMMANDS == === status === scriptmgr config.ini status Show status for all known jobs. === start === scriptmgr config.ini start -a scriptmgr config.ini start -t service scriptmgr config.ini start job_name1 job_name2 ... Launch script(s) that are not running. === stop === scriptmgr config.ini stop -a scriptmgr config.ini stop -t service scriptmgr config.ini stop job_name1 job_name2 ... Stop script(s) that are running. === restart === scriptmgr config.ini restart -a scriptmgr config.ini restart -t service scriptmgr config.ini restart job_name1 job_name2 ... Restart scripts. === reload === scriptmgr config.ini reload -a scriptmgr config.ini reload -t service scriptmgr config.ini reload job_name1 job_name2 ... Send SIGHUP to scripts that are running. == CONFIG == include::common.config.txt[] === scriptmgr parameters === config_list:: List of glob patterns for finding config files. Example: config_list = ~/dbscripts/conf/*.ini, ~/random/conf/*.ini === Service section parameters === cwd:: Working directory for script. args:: Arguments to give to script, in addition to `-d`. script:: Path to script. Unless script is in PATH, full path should be given. disabled:: If this service should be ignored. user:: Launch service as different unix user. Scriptmgr uses `sudo` to switch users. So it either needs to be run as root, or sudo config must allow it to launch daemons. === Example config file === [scriptmgr] job_name = scriptmgr_livesrv logfile = ~/log/%(job_name)s.log pidfile = ~/pid/%(job_name)s.pid config_list = ~/scripts/conf/*.ini # defaults for all service sections [DEFAULT] cwd = ~/scripts [table_dispatcher] script = table_dispatcher.py args = -v [cube_dispatcher] script = python2.4 cube_dispatcher.py disabled = 1 [pgqadm] script = ~/scripts/pgqadm.py args = ticker == COMMAND LINE SWITCHES == include::common.switches.txt[] Options specific to scriptmgr: -a, --all:: Operate on all non-disabled scripts. -t 'service', --type='service':: Operate on all non-disabled scripts of this service type. -w, --wait:: Wait for script(s) after signalling. skytools-3.2.6/doc/common.config.txt0000644000000000000000000000145312426435645014357 0ustar === Common configuration parameters === job_name:: Name for particulat job the script does. Script will log under this name to logdb/logserver. The name is also used as default for PgQ consumer name. It should be unique. pidfile:: Location for pid file. If not given, script is disallowed to daemonize. logfile:: Location for log file. loop_delay:: If continuisly running process, how long to sleep after each work loop, in seconds. Default: 1. connection_lifetime:: Close and reconnect older database connections. use_skylog:: foo. ifdef::pgq[] === Common PgQ consumer parameters === queue_name:: Queue name to attach to. No default. consumer_name:: Consumers ID to use when registering. Default: %(job_name)s endif::pgq[] skytools-3.2.6/doc/skytools_upgrade.txt0000644000000000000000000000157712426435645015230 0ustar = skytools_upgrade(1) = == NAME == skytools_upgrade - utility for upgrading Skytools code in databases. == SYNOPSIS == skytools_upgrade.py [switches] connstr [connstr ...] == DESCRIPTION == It connects to given database, then looks for following schemas: pgq:: Main PgQ code. pgq_ext:: PgQ batch/event tracking in remote database. londiste:: Londiste replication. If schema exists, its version is detected by querying .version() function under schema. If the function does not exists, there is some heuristics built in to differentiate between 2.1.4 and 2.1.5 version of the schemas. If detected that version is older than current, it is upgraded by applying upgrade scripts in order. == COMMAND LINE SWITCHES == include::common.switches.txt[] Options specific to skytools_upgrade: --all:: Upgrade all databases. --not-really:: Don't actually do anything. skytools-3.2.6/doc/faq.txt0000644000000000000000000000703712426435645012376 0ustar = Skytools FAQ = == Skytools == === What is Skytools? === It is bunch of database management tools we use and various frameworks / modules they depend on. Main components are `pgq` SQL module which implements generic queue in database, Python module for writing consumers for it, Londiste replication on top of them and walmgr for setting up WAL-based standby servers. == PgQ - The generic queue == === Why do queue in database? Transactional overhead? === 1. PgQ is quite likely the fastest ACID compliant queue, thanks to Postgres being pretty fast despite the "transactional overhead". Why use anything less robust? 2. We have lot of business logic in database. Events created by business transactions need to live or die with main transaction. 3. Queue used for replication purposes needs to be transactional. I think the reason people act surprised when they hear about queue in database is not that they don't care about reliability of their event transport, but that the reliable data storage mechanism - SQL databases - did not have any way to write performant queue. Now thanks to the txid/snapshot technique we have a way to write fast _and_ reliable queue, so why (care about anything less). === Could you break dependancy on Python? === There is no dependancy on Python. The PgQ itself is written in C / plpgsql and it appears as bunch of SQL functions under `pgq` schema. Thus it can be used from any language that can execute SQL queries. There is Python helper framework that makes writing Python consumers easier. Such framework could be written for any language. === Aren't the internals similar to Slony-I? === Yes, PgQ was created by generalizing queueing parts from Slony-I. === Dump-restore === Database which contains `pgq` schema can be dumped and restored with `pg_dump`, but extra steps must be taken because PgQ tables contains transaction id-s and snapsnot which are extracted from Postgres code. PgQ depends on transaction id values going always higher. Thus restoring database in new Postgres clusten will break PgQ if that cluster has txids smaller than they were in old cluster. Postgres interally uses rolling 32-bit txids which on export are combined with 'txid epoch', which simply is count how many times the 32-bit txid has already cycled. Thus the way to make sure new cluster has highed txids than old one is to set the epoch higher in new cluster than it was in old cluster. To see epoch in old cluster in running database: SELECT (txid_current() >> 32) as epoch; epoch ----- 1 To see epoch on shut down database: $ pg_resetxlog -n $DATADIR ... Latest checkpoint's NextXID: 0/3938 (epoch -> 0) ... To set epoch in new cluster - it must be shut down first: $ pg_resetxlog -e $NEWEPOCH $DATADIR == Londiste - The replication tool == === What type of replication it does? === Londiste does trigger-based asynchronous single-master replication, same as Slony-I. In Skytools 3.x it will support merging partitions togethers, that could be called shared-nothing multimaster replication. === What is the difference between Slony-I and Londiste? === Nothing fundamental. Both do asynchronous replication. Main difference is that Londiste consists of several relatively independent parts, unlike Slony-I where code is more tightly tied together. At the moment Londiste loses to Slony-I featurewise, but should be easier to use. Hopefully we can keep the simple UI when we catch up in features. === What are the limitations of Londiste === It does not support '.' and ',' in table, schema and column names. skytools-3.2.6/doc/queue_splitter3.txt0000644000000000000000000000534012426435645014757 0ustar = queue_splitter3(1) = == NAME == queue_splitter3 - PgQ consumer that transports events from one queue into several target queues == SYNOPSIS == queue_splitter3 [switches] config.ini == DESCRIPTION == queue_spliter is PgQ consumer that transports events from source queue into several target queues. `ev_extra1` field in each event shows into which target queue it must go. (`pgq.logutriga()` puts there the table name.) One use case is to move events from OLTP database to batch processing server. By using queue spliter it is possible to move all kinds of events for batch processing with one consumer thus keeping OLTP database less crowded. == QUICK-START == Basic queue_splitter setup and usage can be summarized by the following steps: 1. pgq must be installed both in source and target databases. See pgqadm man page for details. Target database must also have pgq_ext schema installed. 2. edit a queue_splitter configuration file, say queue_splitter_sourcedb_sourceq_targetdb.ini 3. create source and target queues $ pgqadm.py ticker.ini create 4. launch queue splitter in daemon mode $ queue_splitter3 queue_splitter_sourcedb_sourceq_targetdb.ini -d 5. start producing and consuming events == CONFIG == include::common.config.txt[] === queue_splitter parameters === src_db:: Source database. dst_db:: Target database. === Example config file === [queue_splitter3] job_name = queue_spliter_sourcedb_sourceq_targetdb src_db = dbname=sourcedb dst_db = dbname=targetdb pgq_queue_name = sourceq logfile = ~/log/%(job_name)s.log pidfile = ~/pid/%(job_name)s.pid == COMMAND LINE SWITCHES == include::common.switches.txt[] == USECASE == How to to process events created in secondary database with several queues but have only one queue in primary database. This also shows how to insert events into queues with regular SQL easily. CREATE SCHEMA queue; CREATE TABLE queue.event1 ( -- this should correspond to event internal structure -- here you can put checks that correct data is put into queue id int4, name text, -- not needed, but good to have: primary key (id) ); -- put data into queue in urlencoded format, skip actual insert CREATE TRIGGER redirect_queue1_trg BEFORE INSERT ON queue.event1 FOR EACH ROW EXECUTE PROCEDURE pgq.logutriga('singlequeue', 'SKIP'); -- repeat the above for event2 -- now the data can be inserted: INSERT INTO queue.event1 (id, name) VALUES (1, 'user'); If the queue_splitter is put on "singlequeue", it spreads the event on target to queues named "queue.event1", "queue.event2", etc. This keeps PgQ load on primary database minimal both CPU-wise and maintenance-wise. skytools-3.2.6/doc/pgqd.txt0000644000000000000000000000243612426435645012560 0ustar = pgqq(1) = == NAME == pgqd - PGQ ticker daemon == SYNOPSIS == pgqd [ options ... ] config.file == DESCRIPTION == PgQ ticker and maintenance daemon. Works with several databases in parallel. == GENERAL OPTIONS == Switches: -v:: Increase verbosity -q:: No output to console -d:: Daemonize -h:: Show help -V:: Show version --ini:: Show sample config file -s:: Stop - send SIGINT to running process -k:: Kill - send SIGTERM to running process -r:: Reload - send SIGHUP to running process == CONFIG FILE === Sample configuration file [pgqd] # where to log logfile = ~/log/pgqd.log # pidfile pidfile = ~/pid/pgqd.pid ## optional parameters ## # libpq connect string without dbname= #base_connstr = # startup db to query other databases #initial_database = template1 # limit ticker to specific databases #database_list = # log into syslog #syslog = 1 #syslog_ident = pgqd #syslog_facility = local0 ## optional timeouts ## # how often to check for new databases #check_period = 60 # how often to flush retry queue #retry_period = 30 # how often to do maintentance #maint_period = 120 # how often to run ticker #ticker_period = 1 skytools-3.2.6/doc/devnotes.txt0000644000000000000000000001250712426435645013454 0ustar = Notes for developers = == Coding Style == === Python === Standard Python style with 4-space indent, no tabs. * http://python.org/dev/peps/pep-0008/[PEP-8: Style Guide to Python Code] * http://www.python.org/dev/peps/pep-0257/[PEP-257: Docstring conventions] === C === * http://lxr.linux.no/linux/Documentation/CodingStyle[Linux kernel style] - K&R with 8-space tabs. * Target is modern C (c99) - vararg macros, struct field initializers are OK. * `static inline` is perferred to macros. There may be couple places still using the historical Postgres style with half-tength tabs. Please follow if doing small patches to those files. For bigger work it may be preferable to reindent the file. === SQL === * Indent with 4 spaces. * All-lowercase (expecting syntax highlighing editor). * We use NaturalDocs for API documentation, see existing code for examples. * Functions should use OUT parameters instead of return types. * Local variables should prefixed with '_'. * Database clients should not access tables directly but do operations via functions. (Except when script's task is to replicate tables.) * Any sort of comma-first style is forbidden. Code should be optimized for reading not writing. == Patches == Although the developemt happens in GIT repos, the contributors are not required to publish their changes via GIT, sending patches is fine. The preferred patch format is unified diff, which is the default for git: $ git diff > patch or with plain `diff`: $ diff -ur skytools-2.1.9 skytools-my > patch == GIT usage == === Initial cloning === libusual is used as git subproject, so after inital clone submodule update should be done: $ git clone git://github.com/markokr/skytools.git $ cd skytools $ git submodule init $ git submodule update === Repos === Master Skytools repository: `git://github.com/markokr/skytools.git` Master libusual repository: `git://github.com/markokr/libusual.git Currently known developer repos are on github.com: * http://github.com/markokr[] * http://github.com/mpihlak[] === Commit style === GIT expects first line of commit message to be short summary, rest of the message in-depth explanation about commit. The short summary is used by `git shortlog`, `gitk` and various web-interfaces. So the commit message should be written in email style - first a subject line, empty line then longer details. Short summary should also contain component name or subdir that the commit touches: ------------------------------------------------------------- sql/pgq: reindent C code Several places had whitespace bugs, probably due to copy-paste. As there is no point keeping historical PG style around here, reindent with proper -kr -i8. ------------------------------------------------------------- === Developer workflow === ==== Initial setup ==== $ git config --global user.name "Marko Kreen" $ git config --global user.email "markokr@gmail.com" Optional: make git colorful: ## make 'less' accept color codes $ export PAGER=less $ export LESS="-R" # markokr: LESS="-RgQnh2" ## make git use color $ git config --global color.branch auto $ git config --global color.diff auto $ git config --global color.pager true $ git config --global color.status true ## make log nicer $ git config --global log.decorate short $ git config --global log.abbrevCommit true Optional: activate tab-completion for git, pick one of the lines below and put it into your `.bashrc`: ------------------------------------------------------------- # 1) use unpacked source tree source $git_src_tree/contrib/completion/git-completion.bash # 2) use packaged git (preferred) source /etc/bash_completion.d/git # 3) use packaged git, turn extended completion for everything # [ markokr: buggy completion modules can be rather annoying # so it may be preferable to activate them one-by-one ] source /etc/bash_completion ------------------------------------------------------------- Optional: show current checked out branch in bash prompt, requires the completion script from above: PS1='\h:\w$(__git_ps1 " (%s)")\$ ' ==== Developement tasks ==== First, do the initial cloning as described above. Add your own writable repo, named 'self': $ cd skytools $ git remote add self git@github.com:${username}/skytools.git Push initial contents into it: $ git push self master Fetch changes from upstream repo into branch 'origin/master', but do not merge into local 'master': $ git fetch origin See changes in upstream repo: $ git log [-p] origin/master Merge changes from upstream repo into currently checked out branch: $ git merge origin/master Alternative: do fetch+merge in one go (assuming you are in 'master' branch): $ git pull Commit a change, push to your repo (on 'master' branch): $ edit oldfile $ edit newfile $ git add newfile $ git commit -a -m '..' $ git push self master Create a branch for your changes, starting from checked out branch $ git branch mybranch $ git checkout mybranch ## or, in one command $ git checkout -b mybranch Commit files $ edit oldfile $ edit newfile $ git add newfile $ git commit -a -m 'commit summary' ## optional: merge, or update commits relative to master branch $ git rebase -i master ## merge into master $ git checkout master $ git merge mybranch Push changes into your own public repo: $ git push self master skytools-3.2.6/doc/skytools3.txt0000644000000000000000000001105612426435645013575 0ustar Skytools 3 - cascaded replication ================================= Keep old design from Skytools 2 ------------------------------- * Worker process connects to only 2 databases, there is no everybody-to-everybody communication going on. * Worker process only pulls data from queue. - No pushing with LISTEN/NOTIFY is used for data transport. - Administrative work happens in separate process. - Can go down anytime, without affecting anything else. * Relaxed attitude about tables. - Tables can be added/removed at any time. - Initial data sync happens table-by-table, no attempt is made to keep consistent picture between tables during initial copy. New features in Skytools 3 -------------------------- * Cascading is implemented as generic layer on top of PgQ - *Cascaded PgQ*. - Its goal is to keep identical copy of queue contents in several nodes. - Not replication-specific - can be used for any queue. - Advanced admin operations: takeover, change-provider, pause/resume. - For terminology and technical details see here: set.notes.txt. * New Londiste features: - Parallel copy - during initial sync several tables can be copied at the same time. In 2.x the copy already happened in separate process, making it parallel was just a matter of tuning launching/syncing logic. - EXECUTE command, to run random SQL script on all nodes. The script is executed in single TX on root, and inserted as an event into the queue in the same TX. The goal is to emulate DDL AFTER TRIGGER that way. Londiste itself does no locking and no coordination between nodes. The assumption is that the DDL commands themselves do enough locking. If more locking is needed is can be added to script. - Automatic table or sequence creation by importing the structure from provider node. Activated with --create switch for add-table, add-seq. By default *everything* is copied, including Londiste own triggers. The basic idea is that the triggers may be customized and that way we avoid the need to keep track of trigger customizations. - Ability to merge replication queues coming from partitioned database. The possibility was always there but now PgQ keeps also track of batch positions, allowing loss of the merge point. - Londiste now uses the intelligent log-triggers by default. The triggers were introduced in 2.1.x, but were not on by default. Now they are used by default. - Londiste processes events via 'handlers'. Thus we can do table partitioning in Londiste, instead of custom consumer, which means all Londiste features are available in such situation - like proper initial COPY. To see list of them: `londiste3 x.ini show-handlers`. - Target table can use different name (--dest-table) * New interactive admin console - qadmin. Because long command lines are not very user-friendly, this is an experiment on interactive console with heavy emphasis on tab-completion. * New multi-database ticker: `pgqd`. It is possible to set up one process that maintains all PgQ databases in one PostgreSQL instance. It will auto-detect both databases and whether they have PgQ installed. This also makes core PgQ usable without need for Python. Minor improvements ------------------ * sql/pgq: ticks also store last sequence pos with them. This allowed also to move most of the ticker functionality into database. Ticker daemon now just needs to call SQL function periodically, it does not need to keep track of seq positions. * sql/pgq: Ability to enforce max number of events that one TX can insert. In addition to simply keeping queue healthy, it also gives a way to survive bad UPDATE/DELETE statements with buggy or missing WHERE clause. * sql/pgq: If Postgres has autovacuum turned on, internal vacuuming for fast-changing tables is disabled. * python/pgq: pgq.Consumer does not register consumer automatically, cmdline switches --register / --unregister need to be used for that. * londiste: sequences are now pushed into queue, instead pulled directly from database. This reduces load on root and also allows in-between nodes that do not have sequences. * psycopg1 is not supported anymore. * PgQ does not handle "failed events" anymore. * Skytools 3 modules are parallel installable with Skytools 2. Solved via loader module (like http://faq.pygtk.org/index.py?req=all#2.4[pygtk]). import pkgloader pkgloader.require('skytools', '3.0') import skytools Further reading --------------- * http://skytools.projects.postgresql.org/skytools-3.0/[Documentation] for skytools3. skytools-3.2.6/doc/set.notes.txt0000644000000000000000000001306712426435645013551 0ustar = Technical notes about cascaded queuing = == Termins == set:: Group of nodes that distribute a single queue. In addition to copying events around, they also keep same batch boundaries and tick_ids. node:: A database that participates in cascaded copy of a queue. provider:: Node that provides queue data to another. subscriber:: Node that receives queue data from another. == Goals == * Main goals: - Nodes share same queue structure - in addition to events, also batches and their tick_ids are same. That means they can change their provider to any other node in set. - (Londiste) Queue-only nodes. Cannot be initial providers to other nodes, because initial COPY cannot be done. * Extra goals: - Combining data from partitioned plproxy db's. - (Londiste) Data-only nodes, without queue - leafs. Cannot be providers to other nodes. == Node types == root:: Place where queue data is generated. branch:: * Carries full contents of the queue. * (Londiste) May subscribe to all/some/none of the tables. * (Londiste) Can be provider for initial copy only if subscribes to table leaf:: Data-only node. Events are replayed, but no queue, thus cannot be provider to other nodes. Nodes where sets from partitions are merged together are also tagged 'leaf', because in per-partition set it cannot be provider to other nodes. merge-leaf:: [Does not exist as separate type, detected as 'leaf' that has 'combined_queue' set.] Exists in per-partition set. - Does not have it's own queue. - (Londiste) Initial COPY is done with --skip-truncate, - Event data is sent to combined queue. - tick_id for each batch is sent to combined queue. - Queue reader from partition to combined-failover must lag behind combined queue coming from combined-root combined-root:: [Does not exist as separate type, detected as 'root' that has 'leaf's with 'combined_queue' set.] - Master for combined queue. Received data from several per-partition queues. - Also is merge-leaf in every per-partition queue. - Queue is filled directly from partition queues. combined-failover:: [Does not exist as separate type, detected as 'branch' that has 'leaf's with 'combined_queue' set.] - participates in combined-set, receives events. - also is queue-only node in every part-set. - but no processing is done, just tracking == Node behaviour == Consumer behaviour | ROOT | BRANCH | LEAF | C-ROOT | C-BRANCH | M-LEAF => C-ROOT | M-LEAF => C-BRANCH -------------------------------+------+--------+------+--------+----------+------------------+-------------------- read from queue | n | yes | yes | n | yes | yes | yes event copy to queue | n | yes | n | n | yes | yes, to c-set | n event processing | n | yes | yes | n | yes | n | n send tick_id to queue | n | n | n | n | n | yes | n send global_watermark to queue | yes | n | n | yes | n | n | n send local watermark upwards | n | yes | yes | n | yes | yes | yes wait behind combined set | n | n | n | n | n | n | yes == Design Notes == * Any data duplication should be avoided. * Only duplicated table is for node_name -> node_connstr mappings. For others there is always only one node responsible. * Subscriber gets its own provider url from database, so switching to another provider does not need config changes. * Ticks+data can only be deleted if all nodes have already applied it. - Special consumer registration on all queues - ".global_watermark". This avoids PgQ from deleting old events. - Nodes propagate upwards their lowest tick: local_watermark - Root sends it's local watermark as "pgq.global-watermark" event to the queue. - When branch/leaf gets new watermark event, it moves the ".global_watermark" registration. == Illustrations == === Simple case === +-------------+ +---------------+ | S1 - root |----->| S2 - branch | +-------------+ +---------------| | | V +-------------+ +-------------+ | S3 - branch |----->| S4 - leaf | +-------------+ +-------------+ (S1 - set 'S', node '1') On the loss of S1, it should be possible to direct S3 to receive data from S2. On the loss of S3, it should be possible to direct S4 to receive data from S1/S2. === Complex case - combining partitions === +----+ +----+ +----+ +----+ | A1 | | B1 | | C1 | | D1 | +----+ +----+ +----+ +----+ | | | | | | | | +-----------------------+ | | | +-->| S1 - combined-root | | | +----------->| | +--------------+ | +-------------------->| A2/B2/C2/D2 - |--->| S3 - branch | +----------------------------->| merge-leaf | +--------------+ | | | | +-----------------------+ | | | | | | | | | V | | | | +------------------------+ | | | +-->| S2 - combined-failover | | | +----------->| | | +-------------------->| A3/B3/C3/D3 - | +----------------------------->| merge-leaf | +------------------------+ On the loss of S1, it should be possible to redirect S3 to S2 and ABCD -> S2 -> S3 must stay in sync. skytools-3.2.6/doc/sql-grants.txt0000644000000000000000000000201312426435645013707 0ustar = SQL permissions (draft) = == Setup == Currently following no-login roles are created during upgrade: `pgq_reader`, `pgq_writer`, `pgq_admin`, `londiste_reader`, `londiste_writer`. Actual grants are not applied to functions, instead default `public:execute` grants are kept. New grants can be applied manually: newgrants_.sql:: applies new rights, drop old public access oldgrants_.sql:: restores old rights - public execute privilege to all functions == New roles == pgq_reader:: Can consume queues (source-side) pgq_writer:: Can write into queues (source-side / dest-side) Can use `pgq_node`/`pgq_ext` schema as regular consumer (dest-side) pgq_admin:: Admin operations on queues, required for CascadedWorker on dest-side. Member of `pgq_reader` and `pgq_writer`. londiste_reader:: Member of `pgq_reader`, needs additional read access to tables. (source-side) londiste_writer:: Member of `pgq_admin`, needs additional write access to tables. (dest-side) skytools-3.2.6/doc/londiste3.txt0000644000000000000000000002660412426435645013534 0ustar = londiste3(1) = == NAME == londiste3 - tool for managing trigger-based replication for PostgreSQL. == SYNOPSIS == londiste3 command [subcommand] [options] == DESCRIPTION == Londiste allows you to setup and administer the replication, and is run as a daemon to handle the replication itself. (Londiste is just a complex PgQ Consumer). See <> below to start your first trigger-based replication in a few steps. The 'command' is one of Node, Replication, Information or Internal commands listed below. Londiste introduces the notion of 'takeover'. It is the action when a local node takes over the work of another node. The other node can be a root node or a branch node and it can be dead or alive when the action is run. Londiste also allows (among many other features): cascading replication, partial replication and custom handlers for replication. == GENERAL OPTIONS == -V, --version:: Print version info and exit. -h, --help:: Show this help message and exit. -q, --quiet:: Log only errors and warnings. -v, --verbose:: Log verbosely. -d, --daemon:: Run in daemon mode (go background). == SPECIFIC OPTIONS == --ini:: Display sample ini file. --set="'param=val[,param=value]'":: Override config setting. == DAEMON OPTIONS == -r, --reload:: Reload config (send SIGHUP). -s, --stop:: Stop program safely (send SIGINT). -k, --kill:: Kill program immediately (send SIGTERM). == REPLICATION EXTRA ARGUMENTS == --rewind:: Change queue position according to destination. --reset:: Reset queue position on destination side. == NODE INITIALIZATION COMMANDS == Initialization commands will set up "public connect string" for current node. It is a connect string that other nodes will use to connect to current node. The local Londiste itself uses 'db' option from config file to connect to local node, which can have different user rights than scripts coming over public connect string. Connect strings can be set in either command line or config file. Command line overrides config. Setting them up in config might be more comfortable. See `londiste3 --ini` for details. === create-root [] === Initializes a Master node. The is the name of the node, it should be unique. === create-branch [] [--provider=] === Initializes a Slave node which can be used as a reference for other nodes. The is the name of the node, it should be unique. The argument is the connection string to the database on the current node and is the connection string to the provider database (it can be a root node or a branch node). === create-leaf [] [--provider=] === Initializes a slave node which can not be used as a reference for other nodes. The is the name of the node, it should be unique. The argument is the connection string to the database on the current node and is the connection string to the provider database (it can be a root node or a branch node). --merge='qname':: combined queue name == NODE ADMINISTRATION COMMANDS == === pause === Pause the consumer: the replication of the events is stopped and can be resumed later. === resume === When the consumer has been paused, let it replay again. === change-provider === Make become the new provider for the current node. TODO: londiste.py need update (param change from --provider) === takeover [--target=] [--all] [--dead] === This command allows to achieve failover and switchover for any of your providers (root or branch nodes). --target='tonode':: Target node of the takeover. --all:: In addition to take over the work from the 'fromnode', make other nodes change their provider to the current node. --dead:: Don't wait to take the new role and flag the 'fromnode' as dead. --dead='deadnode':: Assume node is dead. TODO : why use this one ? --dead-root:: Old node was root. --dead-branch:: Old node was branch === resurrect === In case root was down and taken over with `--dead-root`, this command fixes queue contents on that old root to match the rest of cascade. Events that did not propagate to rest of the cascade before failure (lost events) are dumped into file in JSON format and deleted from queue. Then the node is registered into cascade and it's worker will be paused. It requires that there is another active root in cascade and there is consumer named `NODENAME.gravestone` registered on same node, it uses that to get position where rest of the cascade moved on. It does not touch actual tables, which means there must be external mechanism to survive unsynced tables. Options: * Ignore lost events. May need trigger/rule on tables to handle conflicts. * Replay the lost events on new root. May need trigger/rule on tables to handle conflicts. * Roll back table changes. May need old version of row stored in events. (Achieved with `backup` parameter to `pgq.logutriga`) === drop-node === Remove the node from the Londiste replication. Londiste triggers on the node are removed but Londiste or PgQ are not removed. === tag-dead === Tag the node as dead, the command can be run from any node in the replication. === tag-alive === Tag the node as alive, the command can be run from any node in the replication. == INFORMATION COMMANDS == === status === Show status of the replication viewed by the current node. The output is a tree of the members of the replication with their lags, last tick, status and the number of tables in state: ok/half/ignored (replicated, initial copy not finnished, table not replicated locally). === members === Show members of the replication viewed by the current node. Output the nodes name, status and node location (connection string to the node). === show-consumers [--node] === TODO: command is not working == REPLICATION DAEMON COMMAND == === worker === Replay events to subscriber: it is needed to make the replication active as it will start to replay the events. == REPLICATION ADMINISTRATION COMMANDS == === add-table
[args] === Add the table to the replication. See <> below for the list of possible arguments. === remove-table
=== Remove the table from the replication. === add-seq [args] === Add the sequence to the replication. See <> below for the list of possible arguments. === remove-seq === Remove the sequence from the replication. === tables === Show all tables on provider. === seqs === Show all sequences on provider. === missing === List tables subscriber has not yet attached to. === resync
=== Do full copy of the table, again. == ADD ARGUMENTS [[add_args]] == --all:: Include all possible tables. --wait-sync:: Wait until newly added tables are synced fully. --dest-table='table':: Redirect changes to different table. --force:: Ignore table differences. --expect-sync:: No copy needed. --skip-truncate:: Keep old data. --create:: Create table/sequence if not exist, with minimal schema. --create-full:: Create table/sequence if not exist, with full schema. --trigger-flags='trigger_flags':: Trigger creation flags, see below for details. --trigger-arg='trigger_arg':: Custom trigger arg (can be specified multiply times). --no-triggers:: Dont put triggers on table (makes sense on leaf node). --handler='handler':: Custom handler for table. --handler-arg='handler_arg':: Argument to custom handler. --copy-node='NODE_NAME':: Do initial copy from that node instead from provider. Useful if provider does not contain table data locally or is simply under load. --merge-all:: Merge tables from all source queues. --no-merge:: Don't merge tables from source queues. --max-parallel-copy='max_parallel_copy':: Max number of parallel copy processes. --skip-non-existing:: Skip objects that do not exist. Trigger creation flags (default: AIUDL): - I - ON INSERT - U - ON UPDATE - D - ON DELETE - Q - use pgq.sqltriga() as trigger function - L - use pgq.logutriga() as trigger function - B - BEFORE - A - AFTER - S - SKIP == REPLICATION EXTRA COMMANDS == === check === Compare table structure on both sides. === fkeys === Print out fkey drop/create commands. === compare [
] === Compare table contents on both sides. --count-only:: Just count rows, do not compare data. === repair [
] [--force] === Repair data on subscriber. --force:: Ignore lag. === execute [filepath] === Execute SQL files on each node of the cascaded queue. The SQL file is executed locally in single transaction and inserted into queue in same transaction. Thus guaranteeing that is will be replayed in subscriber databases at correct position. The filename is stored in `londiste.applied_execute` table, and checked before execution. If same filename already exists, the SQL execution is skipped. ==== SQL meta-data attributes ==== SQL file can contain attributes that limit where the SQL is executed: --*-- --*-- Local-Table: mytable, othertable, --*-- thirdtable --*-- Local-Sequence: thisseq --*-- The magic comments are searched only in file start, before any actual SQL statement is seen. Empty lines and lines with regular SQL comments are ignored. Supported keys: Local-Table:: Table must be added to local node with `add-table`. Local-Sequence:: Sequence must be added to local node with `add-seq`. Local-Destination:: Table must be added to local node and actual destination table must exists. This is for cases where table is added to some nodes with handler that does not need actual table to exist. Need-Table:: Physical table must exist in database. It does not matter if it is replicated or not. Need-Sequence:: Sequence must exist in database. Need-Function:: Database function must exists. The function name is in form `function_name(nargs)`. If the `(nargs)` portion is missed then nargs is taken as 0. Need-View:: A view must exist in database. Need-Schema:: Schema mist exist in database. Londiste supports table renaming, where table is attached to queue with one name but events are applied to local table with different name. To make this work with EXECUTE, the Local-Toble and Local-Destination support tag replacement, where queue's table name that is mentioned in attribute is replaced with actual table name in local database: --*-- Local-Table: mytable ALTER TABLE @mytable@ ...; === show-handlers ['handler'] === Show info about all or a specific handler. === wait-sync === Wait until all added tables are copied over. === wait-provider === Wait until local node passes latest queue position on provider. === wait-root === Wait until local node passes latest queue position on root. == INTERNAL COMMAND == === copy === Copy table logic. == EXIT STATUS == 0:: Successful program execution. == ENVIRONMENT == PostgreSQL environment variables can be used. == EXAMPLES [[examples]] == Londiste is provided with HowTos to help you make your fisrt steps: - How to set up simple replication. - How to set up cascaded replication. - How to set up table partitioning (handlers). skytools-3.2.6/doc/simple_consumer3.txt0000644000000000000000000000107312426435645015110 0ustar = simple_consumer3(1) = == NAME == simple_consumer3 - PgQ consumer that executes query for each event == SYNOPSIS == simple_consumer3.py [switches] config.ini == DESCRIPTION == For each event in batch it will execute query, filling event values into it. Transactionality: query is executed in autocommit mode, no batch tracking is done. That means on failure, whole batch is fetched and all events are processed again. == CONFIG == Run `simple_consumer3 --ini` to see commented config template. == COMMAND LINE SWITCHES == include::common.switches.txt[] skytools-3.2.6/doc/howto/0000755000000000000000000000000012426435645012217 5ustar skytools-3.2.6/doc/howto/londiste3_simple_rep_howto.txt0000644000000000000000000001442112426435645020325 0ustar = Setting up simple Londiste3 replication = == Introduction == This sample does the following actions: * sets up the databases - creates a database 'l3simple', on host which will be master - populates this with pgbench schema and data - adds primary and foreign keys to make the db more realistic - makes a copy of the database on another node to be used as slave * sets up replication from the master to slave database - creates the root node on the master node - creates a leaf node on the slave node - starts the ticker daemon on the master node - adds all tables to replication set on both databases - waits for the replication to complete It also runs pgbench to test that the replication actually happens and works properly. == Set up schema for root database == === Create database === Run the following SQL: ---- CREATE DATABASE l3simple; ---- === Set up pgbench schema === In this HowTo we are using pgbench for setting up the schema, populating it with sample data and later running SQL loads to be replicated. Run command : ---- pgbench -i -s 2 -F 80 l3simple ---- === And add primary and foreign keys needed for replication === Standard pgbench schema lacks Primary Key on the history table. As Londiste need primary keys we add one. We also add Foreign Keys between tables, as these may help detect possible replication failures. create file /tmp/prepare_pgbenchdb_for_londiste.sql with the following ... ---- -- add primary key to history table ALTER TABLE pgbench_history ADD COLUMN hid SERIAL PRIMARY KEY; -- add foreign keys ALTER TABLE pgbench_tellers ADD CONSTRAINT pgbench_tellers_branches_fk FOREIGN KEY(bid) REFERENCES pgbench_branches; ALTER TABLE pgbench_accounts ADD CONSTRAINT pgbench_accounts_branches_fk FOREIGN KEY(bid) REFERENCES pgbench_branches; ALTER TABLE pgbench_history ADD CONSTRAINT pgbench_history_branches_fk FOREIGN KEY(bid) REFERENCES pgbench_branches; ALTER TABLE pgbench_history ADD CONSTRAINT pgbench_history_tellers_fk FOREIGN KEY(tid) REFERENCES pgbench_tellers; ALTER TABLE pgbench_history ADD CONSTRAINT pgbench_history_accounts_fk FOREIGN KEY(aid) REFERENCES pgbench_accounts; ---- then load it into database: ---- psql l3simple -f /tmp/prepare_pgbenchdb_for_londiste.sql ---- Create and populate target database: ---- psql -d postgres -c "CREATE DATABASE l3simple_db2;" pg_dump -s l3simple_db1 | psql l3simple_db2 ---- Create configuration file st3simple/st3_l3simple_primary.ini ---- [londiste3] job_name = st3_l3simple_db1 db = dbname=l3simple_db1 queue_name = replika logfile = st3simple/log/st3_l3simple_db1.log pidfile = st3simple/pid/st3_l3simple_db1.pid ---- REMINDER: open trust to the leaf host in pg_hba.conf, or add a user to auth as and include in the connstring Create Londiste root node: ---- londiste3 st3simple/st3_l3simple_primary.ini create-root node1 "dbname=l3simple host=10.199.59.194" ---- Run worker daemon for root node: ---- londiste3 -d st3simple/st3_l3simple_primary.ini worker ---- Create configuration file st3simple/st3_l3simple_leaf.ini for worker daemon on target node: ---- [londiste3] job_name = st3_l3simple_leaf db = dbname=l3simple queue_name = replika logfile = st3simple/log/st3_l3simple_leaf.log pidfile = st3simple/pid/st3_l3simple_leaf.pid ---- Initialize node in target database: ---- londiste3 st3simple/st3_l3simple_leaf.ini create-leaf node2 dbname=l3simple --provider="dbname=l3simple host=10.199.59.194" ---- Launch worker daemon for target database: ---- londiste3 -d st3simple/st3_l3simple_leaf.ini worker ---- Create config file `st3simple/pgqd.ini` for PgQ ticker daemon: ---- [pgqd] logfile = st3simple/log/pgqd.log pidfile = st3simple/pid/pgqd.pid ---- Launch ticker daemon: ---- pgqd -d st3simple/pgqd.ini ---- To generate some data traffic on the master database while replicating, run the following command in background: ---- pgbench -T 120 -c 5 l3simple -f /tmp/throttled.pgbench ---- The /tmp/throttled.pgbench contains the standard pgbench workload, except that there are random length waits between commands. Now add all the tables to replication, first on root node and then on the leaf: Run command : ---- londiste3 st3simple/st3_l3simple_primary.ini add-table --all londiste3 st3simple/st3_l3simple_leaf.ini add-table --all ---- == Checking and testing == To test our newly set up replication The following command will run pgbench full speed with 5 parallel database connections generating database traffic for 10 seconds: ---- pgbench -T 10 -c 5 l3simple ---- After this is done, you can check that the tables on both sides have the same data: ---- londiste3 st3simple/st3_l3simple_leaf.ini compare ---- Compare command will establish the same logical point in time on provider and subscriber nodes and then count and checksum the rows on both sides. The result will look like this: ---- 2011-12-25 08:24:42,138 29189 INFO Locking public.pgbench_accounts 2011-12-25 08:24:42,147 29189 INFO Syncing public.pgbench_accounts 2011-12-25 08:24:45,233 29189 INFO Counting public.pgbench_accounts 2011-12-25 08:24:46,154 29189 INFO srcdb: 200000 rows, checksum=3864719477 2011-12-25 08:24:46,953 29189 INFO dstdb: 200000 rows, checksum=3864719477 2011-12-25 08:24:46,961 29189 INFO Locking public.pgbench_branches 2011-12-25 08:24:46,965 29189 INFO Syncing public.pgbench_branches 2011-12-25 08:24:50,528 29189 INFO Counting public.pgbench_branches 2011-12-25 08:24:50,549 29189 INFO srcdb: 2 rows, checksum=-82377983 2011-12-25 08:24:50,556 29189 INFO dstdb: 2 rows, checksum=-82377983 2011-12-25 08:24:50,568 29189 INFO Locking public.pgbench_history 2011-12-25 08:24:50,572 29189 INFO Syncing public.pgbench_history 2011-12-25 08:24:53,641 29189 INFO Counting public.pgbench_history 2011-12-25 08:24:53,660 29189 INFO srcdb: 1310 rows, checksum=-34927328558 2011-12-25 08:24:53,670 29189 INFO dstdb: 1310 rows, checksum=-34927328558 2011-12-25 08:24:53,675 29189 INFO Locking public.pgbench_tellers 2011-12-25 08:24:53,677 29189 INFO Syncing public.pgbench_tellers 2011-12-25 08:24:56,733 29189 INFO Counting public.pgbench_tellers 2011-12-25 08:24:56,737 29189 INFO srcdb: 20 rows, checksum=518235101 2011-12-25 08:24:56,740 29189 INFO dstdb: 20 rows, checksum=518235101 ---- The "checksum" is computed by adding up hashtext() sums for all database rows. == Done == The setup of simple 2 node cluster is done. skytools-3.2.6/doc/howto/setup_walmgr_replication.txt0000644000000000000000000003316312426435645020070 0ustar = Setting up backup and streaming replication with walmgr3 = Hannu Krosing == Introduction == This is a HowTo for setting up PostgreSQL for backup and then continue to setting up both wal-based and streaming' replication using waplmgr3 The samle commandlines and configuration files are produced by running command ----- python WALManager.py ----- in directory python/testwrappers. This test/sample * sets up a postgresql master server using directory /tmp/test_master * creates a sample database tyhere using pgbench * creates walmgr3 master configuration file /tmp/test_master/wal-master.ini * creates a directory and configuration file "on slave" - /tmp/test_slave/wal-slave.ini * runs walmgr3 setup command, which modifies postgresql.conf file on master server for replication * restarts master server for changes of "walmgr3 ... setup" to take effect * runs walmgr3 backup command, which creates a backup of master database and wal files on slace server * modifies master's postgresql.conf and pg_hba.conf file some more for enabling Streaming Replication * runs walmgr3 restore command ON SLAVE which moves backup directory into the right place for slave server and then starts the slave server * verifies that replication is running by running a SQL UPDATE query on master and checking the results on slave. == Preparation == For things to work you need to have passwordless ssh access from master to slave set up using public/private key pair This can be done using walmgr3 ---- 1. Set up passwordless ssh-key on Master and write configuration file master$ walmgr3 --ssh-keygen --init-master --slave 2. Set up passwordless ssh authentication from Master to Slave and write configuration file on Slave slave$ walmgr3 --init-slave --ssh-add-key=/tmp/id_rsa.pub ---- or directly from commandline ---- master$ test -f ~/.ssh/id_dsa.pub || ssh-keygen -t dsa master$ cat ~/.ssh/id_dsa.pub | ssh slave cat \>\> .ssh/authorized_keys ---- == setting up an population master database === This part is for playing with walmgr3 without disturbing your existing databases. You can skip this part, if you already have a database you want to replicate. === create database server === Create database master server directory structure under /tmp/test_master by running the command : ---- /usr/lib/postgresql/9.1/bin/initdb -D /tmp/test_master ---- This database wil run as the user who was logged in at the time of creation, vs. the default user 'postgres' Change the port and socket directory (and set up some logging if you want to see what's going on in the database) Edit /tmp/test_master/postgresql.conf and set up the following ---- unix_socket_directory = /tmp port = 55401 # optional, for logging log_connections = on log_disconnections = on ---- Now you are ready to start up this server ---- /usr/lib/postgresql/9.1/bin/pg_ctl -D /tmp/test_master -l /tmp/test_master/postgresql.log start ---- you can use "tail /tmp/test_master/postgresql.log" to check that server started up correctly, and ---- psql -h /tmp -p 55401 -l ---- to check that it accepts connections === create database and generate some data for it === Create database to be used ---- createdb pgbdb; ---- and initialise a pgbench database structure and data in this database ---- /usr/lib/postgresql/9.1/bin/pgbench -i -s 1 -F 80 pgbdb -h /tmp -p 55401 ---- == Setting up the replication == OK. Now we have a database server to replicate, so lets configure walmgr3 === setting up master === First we need a master configuration file /tmp/test_master/wal-master.ini : ---- [walmgr] job_name = wal-master logfile = /tmp/test_master/%(job_name)s.log pidfile = /tmp/test_master/%(job_name)s.pid use_skylog = 0 master_db = port=55401 host=/tmp dbname=template1 master_data = /tmp/test_master master_config = /tmp/test_master/postgresql.conf master_bin = /usr/lib/postgresql/9.1/bin/ # set this only if you can afford database restarts during setup and stop. # master_restart_cmd = pg_ctlcluster 9.1 main restart slave = 127.0.0.1 slave_config = /tmp/test_slave/wal-slave.ini walmgr_data = /tmp/test_slave_walmanager/backup/ completed_wals = %(walmgr_data)s/logs.complete partial_wals = %(walmgr_data)s/logs.partial full_backup = %(walmgr_data)s/data.master config_backup = %(walmgr_data)s/config.backup # syncdaemon update frequency loop_delay = 10.0 # use record based shipping available since 8.2 use_xlog_functions = 0 # pass -z to rsync, useful on low bandwidth links compression = 0 # keep symlinks for pg_xlog and pg_log keep_symlinks = 1 # tell walmgr to set wal_level to hot_standby during setup hot_standby = 1 # periodic sync #command_interval = 600 #periodic_command = /var/lib/postgresql/walshipping/periodic.sh ---- The things to takew notice hera ar that * walmgr_data is a directory _on_the_slave_host_ * it is a bad idea to put slave_config in slave data directory (that would be /tmp/test_slave/data/ as defined in wal-slave.ini below) as then it gets overwritten when doing the restore. === setting up slave === You also need a walmgr3 conf file on slave, /tmp/test_slave/wal-slave.ini : ---- [walmgr] job_name = wal-standby logfile = /tmp/test_slave_walmanager/%(job_name)s.log use_skylog = 0 slave_data = /tmp/test_slave/data slave_bin = /usr/lib/postgresql/9.1/bin/ slave_stop_cmd = /usr/lib/postgresql/9.1/bin//pg_ctl -D /tmp/test_slave/data stop slave_start_cmd = /usr/lib/postgresql/9.1/bin//pg_ctl -D /tmp/test_slave/data -l /tmp/test_slave/data/postgresql.log start #slave_config_dir = /tmp/test_slave slave_config_dir = /tmp/test_slave/data walmgr_data = /tmp/test_slave_walmanager/backup/ completed_wals = %(walmgr_data)s/logs.complete partial_wals = %(walmgr_data)s/logs.partial full_backup = %(walmgr_data)s/data.master config_backup = %(walmgr_data)s/config.backup backup_datadir = no keep_backups = 0 # archive_command = # primary database connect string for hot standby -- enabling # this will cause the slave to be started in hot standby mode. primary_conninfo = host=127.0.0.1 port=55401 host=/tmp --- === Configuring postgreSQL for replication using walmgr3 === Running the command : ---- walmgr3 /tmp/test_master/wal-master.ini setup ---- Modifies master postgresql ini file (/tmp/test_master/postgresql.conf) with these values ---- wal_level = 'hot_standby' archive_mode = 'on' archive_command = '/usr/local/bin/walmgr3 /tmp/test_master/wal-master.ini xarchive %p %f' ---- To enable streaming replication, you will need see that max_wal_senders is above 0 in /tmp/test_master/postgresql.conf ---- setting: max_wal_senders = 3 ---- And yoy need to specifically enable the access to "replication" in /tmp/test_master/pg_hba.conf (Replication has to be enabled by name, database wildcard * does not cover it) ---- local replication ubuntu trust ---- it is "local" here, as this test/sample does the replication iover local socket, follow the commenst in pg_hba.conf for your case. And again, fo the changes to take effect, you need to restart the server ---- /usr/lib/postgresql/9.1/bin/pg_ctl -D /tmp/test_master restart ---- == Making the backup == Once the configuration files are in place, making a backup is as simple as running ---- walmgr3 /tmp/test_master/wal-master.ini backup ---- If all goes well, this is what gets output (or written to logsfiles, if so configured) ---- 0 2012-01-27 16:58:31,464 30870 INFO Backup lock obtained. 2012-01-27 16:58:31,485 30750 INFO Execute SQL: select pg_start_backup('FullBackup'); [port=55401 host=/tmp dbname=template1] 2012-01-27 16:58:36,779 30750 INFO Checking tablespaces 2012-01-27 16:58:36,786 30750 INFO pg_log does not exist, skipping 2012-01-27 16:58:36,873 30750 INFO Backup conf files from /tmp/test_master 2012-01-27 16:58:38,599 31256 INFO First useful WAL file is: 000000010000000000000002 2012-01-27 16:58:45,442 31633 INFO Backup lock released. 2012-01-27 16:58:45,461 30750 INFO Full backup successful ---- and there will be a copy of your masters data directory on slave host under /tmp/test_slave_walmanager/backup/data.master, WAL files in files in /tmp/test_slave_walmanager/backup/logs.complete/ and copies of configuration files in /tmp/test_slave_walmanager/backup/config.backup/ === Backup is done === If your aim is to just make copies, you can stop this howto here == Starting a Hot Standby replica == === Setting Up the replica === If you want your replica to be usable for read-only queries, and not just report "server is starting up" when you try to connect, then your slave postgresql.conf neeeds to have "hot_standby = on" set: In our sample we also change the port, so we can run both servers on the same host. Editi /tmp/test_slave_walmanager/backup/config.backup/postgresql.conf ---- hot_standby = on port = 55402 ---- === starting the replica === You restore backup to your the replica server and start it in one command ---- walmgr3 /tmp/test_slave/wal-slave.ini restore ---- Herew is what it outputs if everything runs ok ---- 0 server starting 2012-01-27 16:58:45,709 31636 WARNING backup_datadir is disabled, deleting old data dir 2012-01-27 16:58:45,709 31636 INFO Move /tmp/test_slave_walmanager/backup//data.master to /tmp/test_slave/data 2012-01-27 16:58:45,766 31636 INFO Write /tmp/test_slave/data/recovery.conf 2012-01-27 16:58:45,768 31636 INFO Restoring configuration files 2012-01-27 16:58:45,771 31636 INFO Starting postmaster: /usr/lib/postgresql/9.1/bin//pg_ctl -D /tmp/test_slave/data -l /tmp/test_slave/data/postgresql.log start ---- Now you have a streaming replica running at port 55402 and master at port 55401 == Testing replication == You can test that replication is really working by opening connections to both master and server and then watching changes done on master appear instantaneously oin the slave: Check the initial replicated state on slave ---- ubuntu@ubuntu-VirtualBox:~/skytools-markokr/python/testwrappers$ psql -h /tmp -p 55402 pgbdb psql (9.1.1) Type "help" for help. pgbdb=# select * from pgbench_tellers; tid | bid | tbalance | filler -----+-----+----------+-------- 1 | 1 | 0 | 2 | 1 | 0 | 3 | 1 | 0 | 4 | 1 | 0 | 5 | 1 | 0 | 6 | 1 | 0 | 7 | 1 | 0 | 8 | 1 | 0 | 9 | 1 | 0 | 10 | 1 | 0 | (10 rows) ---- Update the pgbench_tellers table on master ---- pgbdb=# \q ubuntu@ubuntu-VirtualBox:~/skytools-markokr/python/testwrappers$ psql -h /tmp -p 55401 pgbdb psql (9.1.1) Type "help" for help. pgbdb=# update pgbench_tellers set filler = random(); UPDATE 10 pgbdb=# select * from pgbench_tellers; tid | bid | tbalance | filler -----+-----+----------+-------------------------------------------------------------------------------------- 1 | 1 | 0 | 0.755602441262454 2 | 1 | 0 | 0.130802480038255 3 | 1 | 0 | 0.725358869414777 4 | 1 | 0 | 0.65205558296293 5 | 1 | 0 | 0.0436737341806293 6 | 1 | 0 | 0.797202748246491 7 | 1 | 0 | 0.909699931740761 8 | 1 | 0 | 0.981106289196759 9 | 1 | 0 | 0.656265312805772 10 | 1 | 0 | 0.759600875433534 (10 rows) pgbdb=# \q ---- And check that it is the same on slave ---- ubuntu@ubuntu-VirtualBox:~/skytools-markokr/python/testwrappers$ psql -h /tmp -p 55402 pgbdb psql (9.1.1) Type "help" for help. pgbdb=# select * from pgbench_tellers; tid | bid | tbalance | filler -----+-----+----------+-------------------------------------------------------------------------------------- 1 | 1 | 0 | 0.755602441262454 2 | 1 | 0 | 0.130802480038255 3 | 1 | 0 | 0.725358869414777 4 | 1 | 0 | 0.65205558296293 5 | 1 | 0 | 0.0436737341806293 6 | 1 | 0 | 0.797202748246491 7 | 1 | 0 | 0.909699931740761 8 | 1 | 0 | 0.981106289196759 9 | 1 | 0 | 0.656265312805772 10 | 1 | 0 | 0.759600875433534 (10 rows) pgbdb=# ----- The replication is done now! skytools-3.2.6/doc/howto/londiste3_cascaded_rep_howto.txt0000644000000000000000000001722212426435645020565 0ustar = How to Set up Cascaded Replication in Londiste = == Basic cluster setup == Basic configuration for cascaded replication setup. The configuration file for ticker (pgqd) restricting ticker to only the 5 replicated databases: ---- $ cat conf/pgqd.ini [pgqd] database_list = db1,db2,db3,db4,db5 logfile = log/pgqd.log pidfile = pid/pgqd.pid ---- The ini files for databases are are created similar to the one below, only the "db1" part changes. ---- $ cat conf/londiste_db1.ini [londiste3] job_name = londiste_db1 db = dbname=db1 queue_name = replika logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid pgq_autocommit = 1 pgq_lazy_fetch = 0 ---- After creating the ini files we are ready to install londiste3 and initialize nodes. ---- $ londiste3 -q conf/londiste_db1.ini create-root node1 dbname=db1 $ londiste3 -q conf/londiste_db2.ini create-branch node2 dbname=db2 --provider=dbname=db1 $ londiste3 -q conf/londiste_db3.ini create-branch node3 dbname=db3 --provider=dbname=db1 $ londiste3 -q conf/londiste_db4.ini create-branch node4 dbname=db4 --provider=dbname=db2 $ londiste3 -q conf/londiste_db5.ini create-branch node5 dbname=db5 --provider=dbname=db3 ---- Now that schemas are installed, we can start the ticker. ---- $ pgqd -q -d conf/pgqd.ini ---- To see the topology of replication, you can run the status command against any node: ---- $ londiste3 -q conf/londiste_db4.ini status Queue: replika Local node: node4 node1 (root) | Tables: 0/0/0 | Lag: 4s, Tick: 2, NOT UPTODATE +--node3 (branch) | Tables: 0/0/0 | Lag: 12s, Tick: 1, NOT UPTODATE +--node2 (branch) | Tables: 0/0/0 | Lag: 12s, Tick: 1, NOT UPTODATE +--node4 (branch) Tables: 0/0/0 Lag: 11s, Tick: 1, NOT UPTODATE ---- And you need a londiste worker process on each node to actually carry out the actions. ---- $ londiste3 -q -d conf/londiste_db1.ini worker $ londiste3 -q -d conf/londiste_db2.ini worker $ londiste3 -q -d conf/londiste_db3.ini worker $ londiste3 -q -d conf/londiste_db4.ini worker $ londiste3 -q -d conf/londiste_db5.ini worker ---- == Adding tables and data == Now let's play with data. Create table on root node and fill couple of rows. ---- $ psql -d db1 -c "create table mytable (id serial primary key, data text)" $ psql -d db1 -c "insert into mytable (data) values ('row1')" $ psql -d db1 -c "insert into mytable (data) values ('row2')" $ psql -d db1 -c "insert into mytable (data) values ('row3')" $ psql -d db1 -c "insert into mytable (data) values ('row4')" ---- Create some load on table. ---- $ ./loadgen.py -d conf/gen1.ini ---- Register table on root node. ---- $ londiste3 -q conf/londiste_db1.ini add-table mytable $ londiste3 -q conf/londiste_db1.ini add-seq mytable_id_seq ---- Register table on other node with creation. ---- $ psql -d db2 -c "create sequence mytable_id_seq" CREATE SEQUENCE $ londiste3 -q conf/londiste_db2.ini add-seq mytable_id_seq $ londiste3 -q conf/londiste_db2.ini add-table mytable --create-full $ psql -d db3 -c "create sequence mytable_id_seq" CREATE SEQUENCE $ londiste3 -q conf/londiste_db3.ini add-seq mytable_id_seq $ londiste3 -q conf/londiste_db3.ini add-table mytable --create-full $ psql -d db4 -c "create sequence mytable_id_seq" CREATE SEQUENCE $ londiste3 -q conf/londiste_db4.ini add-seq mytable_id_seq $ londiste3 -q conf/londiste_db4.ini add-table mytable --create-full $ psql -d db5 -c "create sequence mytable_id_seq" CREATE SEQUENCE $ londiste3 -q conf/londiste_db5.ini add-seq mytable_id_seq $ londiste3 -q conf/londiste_db5.ini add-table mytable --create-full ---- == Change topology == The main advantage of skytools3 cascaded replication is how easy it is to change the replication topology. ---- $ londiste3 -q conf/londiste_db4.ini change-provider --provider=node2 $ londiste3 -q conf/londiste_db4.ini status Queue: replika Local node: node4 node1 (root) | Tables: 1/0/0 | Lag: 1s, Tick: 57 +--node2 (branch) | | Tables: 1/0/0 | | Lag: 1s, Tick: 57 | +--node4 (branch) | Tables: 1/0/0 | Lag: 1s, Tick: 57 +--node3 (branch) | Tables: 1/0/0 | Lag: 1s, Tick: 57 +--node5 (branch) Tables: 1/0/0 Lag: 7s, Tick: 53 ERR: londiste_db5: duplicate key value violates unique constraint "mytable_pkey" ---- Now let's move it to node 3: ---- $ londiste3 -q conf/londiste_db4.ini change-provider --provider=node3 $ londiste3 -q conf/londiste_db4.ini status Queue: replika Local node: node4 node1 (root) | Tables: 1/0/0 | Lag: 0s, Tick: 59 +--node2 (branch) | Tables: 1/0/0 | Lag: 3s, Tick: 59 +--node3 (branch) | Tables: 1/0/0 | Lag: 3s, Tick: 59 +--node4 (branch) | Tables: 1/0/0 | Lag: 3s, Tick: 58 +--node5 (branch) Tables: 1/0/0 Lag: 12s, Tick: 53 ---- ---- $ londiste3 -q conf/londiste_db5.ini change-provider --provider=node2 $ londiste3 -q conf/londiste_db1.ini status Queue: replika Local node: node1 node1 (root) | Tables: 1/0/0 | Lag: 1s, Tick: 64 +--node2 (branch) | | Tables: 1/0/0 | | Lag: 1s, Tick: 64 | +--node5 (branch) | Tables: 1/0/0 | Lag: 1s, Tick: 64 +--node3 (branch) | Tables: 1/0/0 | Lag: 1s, Tick: 64 +--node4 (branch) Tables: 1/0/0 Lag: 1s, Tick: 64 ---- The topology change can also be accomplished from the "other" end, using the `takeover` command: ---- $ londiste3 -q conf/londiste_db3.ini takeover node2 $ londiste3 -q conf/londiste_db2.ini status Queue: replika Local node: node2 node1 (root) | Tables: 1/0/0 | Lag: 0s, Tick: 66 +--node3 (branch) | Tables: 1/0/0 | Lag: 0s, Tick: 66 +--node4 (branch) | Tables: 1/0/0 | Lag: 3s, Tick: 66 +--node2 (branch) | Tables: 1/0/0 | Lag: 0s, Tick: 66 +--node5 (branch) Tables: 1/0/0 Lag: 3s, Tick: 65 ---- The takeover command is in fact the only way to change the root node. ---- $ londiste3 -q conf/londiste_db2.ini takeover node1 $ londiste3 -q conf/londiste_db2.ini status Queue: replika Local node: node2 node2 (root) | Tables: 1/0/0 | Lag: 1s, Tick: 72 +--node5 (branch) | Tables: 1/0/0 | Lag: 1s, Tick: 72 +--node1 (branch) | Tables: 1/0/0 | Lag: 1s, Tick: 71 +--node3 (branch) | Tables: 1/0/0 | Lag: 3s, Tick: 71 +--node4 (branch) Tables: 1/0/0 Lag: 3s, Tick: 71 ---- That's it! skytools-3.2.6/doc/howto/londiste3_merge_howto.txt0000644000000000000000000001536012426435645017270 0ustar = How To Set Up "Merge" Replication of the Same Table from Multiple Partitions = == Introduction == In this howto we will set up a replication scheme, where data is collected from multiple "partition" databases into a single table on "full" database. This situation is common when using PL/Proxy or other similar partitioning solution for OLTP, but still wanting the data in one table for Data Warehousing. Here we will demonstrate the simplest possible setup with 2 partition databases (`part1` and `part2`) replicating their data into one full database `full1`. === Setting the nodes === ==== Partition databases ==== On both partition databases you need to set up a londiste3 "root" node to replicate from. We will use the the following .ini files for that for database part1 `conf/l3_part1_q_part1.ini`: ---- [londiste3] job_name = l3_part1_q_part1 db = dbname=part1 queue_name = l3_part1_q logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid ---- and for part2 `conf/l3_part2_q_part2.ini`: ---- [londiste3] job_name = l3_part2_q_part2 db = dbname=part2 queue_name = l3_part2_q logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid ---- These ini files are then used for setting up the nodes and adding tables to root node. Set up the root nodes on part1 and part2: ---- $ londiste3 -v conf/l3_part1_q_part1.ini create-root part1_root dbname=part1 $ londiste3 -v conf/l3_part2_q_part2.ini create-root part2_root dbname=part2 ---- ==== Full database ==== On the full database, which will hold data from both partitions you need to set up two londiste nodes, one for each of the partition nodes. These will act as the receiving nodes to replicate to. These look very similar and differ only in queue name: File `conf/l3_part1_q_full1.ini`: ---- [londiste3] job_name = l3_part1_q_full1 db = dbname=full1 queue_name = l3_part1_q logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid ---- File `conf/l3_part2_q_full1.ini`: ---- [londiste3] job_name = l3_part2_q_full1 db = dbname=full1 queue_name = l3_part2_q logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid ---- These are first used to set up the leaf nodes: ---- $ londiste3 -v conf/l3_part1_q_full1.ini create-leaf merge_part1_full1 dbname=full1 --provider=dbname=part1 $ londiste3 -v conf/l3_part2_q_full1.ini create-leaf merge_part2_full1 dbname=full1 --provider=dbname=part2 ---- And later also for launching the replication worker daemons. But before launching the workers you need to start the pgqd or the "ticker daemon": ---- $ pgqd -v -d conf/pgqd.ini ---- The `conf/pgqd.ini` file for the command above looks like this: ---- [pgqd] database_list = part1,part2,full1 logfile = log/pgqd.log pidfile = pid/pgqd.pid ---- Now that the ticker is running, it's time to launch londiste3 workers which will do the actual replication: ---- $ londiste3 -v -d conf/l3_part1_q_full1.ini worker $ londiste3 -v -d conf/l3_part2_q_full1.ini worker ---- === Setting up the tables === In order to have something to replicate, we need some tables, so let's create them on partition nodes: ---- $ psql -d "part1" -c "create table mydata (id int4 primary key, data text)" $ psql -d "part2" -c "create table mydata (id int4 primary key, data text)" ---- And then add them to set of replicated tables on the root node: ---- $ londiste3 -v conf/l3_part1_q_part1.ini add-table mydata $ londiste3 -v conf/l3_part2_q_part2.ini add-table mydata ---- Now we need some data in these tables, as replicating empty tables is no fun: ---- $ psql -d "part1" -c "insert into mydata values (1, 'part1')" $ psql -d "part2" -c "insert into mydata values (2, 'part2')" ---- We can check that the tables are actually registered for replication in londiste: ---- $ psql -d "full1" -c "select * from londiste.table_info order by queue_name" nr | queue_name | table_name | local | merge_state | custom_snapshot | dropped_ddl | table_attrs | dest_table ----+------------+---------------+-------+-------------+-----------------+-------------+-------------+------------ 1 | l3_part1_q | public.mydata | f | | | | | 2 | l3_part2_q | public.mydata | f | | | | | (2 rows) ---- Now let's subscribe them on full database. As the table is not yet created on full1, we specify `--create` so londiste creates the table on leaf node based on structure that is on root. The switch `--merge-all` tells londiste to add the table to all queues which have it on root side, not just the one from the .ini file. ---- $ londiste3 -v conf/l3_part1_q_full1.ini add-table mydata --create --merge-all ---- And yes, there it is, subscribed from both queues: ---- $ psql -d "full1" -c "select * from londiste.table_info order by queue_name" nr | queue_name | table_name | local | merge_state | custom_snapshot | dropped_ddl | table_attrs | dest_table ----+------------+---------------+-------+-------------+-----------------+-------------+-------------+------------ 1 | l3_part1_q | public.mydata | t | | | | | 2 | l3_part2_q | public.mydata | t | | | | | (2 rows) ---- Now we can put more data to partition tables: ---- $ psql -d "part1" -c "insert into mydata values (4 + 1, 'part1')" $ psql -d "part2" -c "insert into mydata values (4 + 2, 'part2')" ---- Wait a few seconds: ---- $ sleep 10 ---- And check that the data has indeed appeared on full database: ---- $ psql -d "full1" -c "select * from mydata order by id" id | data ----+------- 1 | part1 2 | part2 5 | part1 6 | part2 (4 rows) ---- The rows with ids 1 and 2 where replicated during initial copy, the ones with 5 and 6 were captured by triggers into event log on partition database and then replicated to full1 using the standard replication process. === checking subscription === Just to check if we really did achieve what we wanted, we see which tables are present and fully subscribed ('ok'): ---- $ psql -d "full1" -c "select * from londiste.table_info order by queue_name" nr | queue_name | table_name | local | merge_state | custom_snapshot | dropped_ddl | table_attrs | dest_table ----+------------+---------------+-------+-------------+-----------------+-------------+-------------+------------ 1 | l3_part1_q | public.mydata | t | ok | | | | 2 | l3_part2_q | public.mydata | t | ok | | | | (2 rows) ---- Ok, here we have the table public.mydata subscribed from 2 queues and its merge_state is 'ok', meaning the initial copy process has been successfull. That's it, we have successfully set up replication from two partition databases to one single full database. skytools-3.2.6/doc/howto/londiste3_partitioning_howto.txt0000644000000000000000000001315312426435645020676 0ustar = Setting up Londiste3 replication to partitions = == Introduction == This sample shows how to use Londiste `part` handler module to split one big table between two databases. The target databases will have `partconf` schema which is usually used to drive PL/Proxy. Here it is used simply to provide configuration to `part` handler. == Prerequisites == Obviously skytools must be installed but in addition we need pghashlib and pgbench. == Setting up the Root Database == === Create databases === Create root database that will contain all data and two shard databases. Run the following SQL: ---- psql -c "CREATE DATABASE rootdb;" psql -c "CREATE DATABASE sharddb_0;" psql -c "CREATE DATABASE sharddb_1;" ---- Deploy hash function everywhere. This is needed because internal hashtext function was changed between 8.3 and 8.4 versions and may be changed again in future without consideration for its users. --- psql rootdb < /usr/share/postgresql/8.4/contrib/hashlib.sql psql sharddb_0 < /usr/share/postgresql/8.4/contrib/hashlib.sql psql sharddb_1 < /usr/share/postgresql/8.4/contrib/hashlib.sql --- === Set up pgbench schema === In this HowTo we are using pgbench for setting up the schema, populating it with sample data and later running SQL loads to be replicated. This command will create pgbench tables and fill them with data: ---- /usr/lib/postgresql/8.4/bin/pgbench -i -s 2 -F 80 rootdb ---- Write partconf.sql that will be deployed to all databases: ---- CREATE SCHEMA partconf; CREATE TABLE partconf.conf ( part_nr integer, max_part integer, db_code bigint, is_primary boolean, max_slot integer, cluster_name text ); CREATE FUNCTION partconf.get_hash_raw ( i_input integer ) RETURNS integer AS $_$ -- used to wrap hashtext so that we can replace it in 8.4 with -- older implementation to keep compatibility select hash_string($1::text, 'lookup2'); $_$ LANGUAGE sql; ---- Populate shard configuration tables. These values are used inside part.py. ---- psql rootdb < partconf.sql psql sharddb_0 < partconf.sql psql sharddb_1 < partconf.sql psql sharddb_0 -c "insert into partconf.conf(part_nr, max_part) values(0,1);" psql sharddb_1 -c "insert into partconf.conf(part_nr, max_part) values(1,1);" ---- Next create configuration files for root node and both partitions. st3partsplit/st3_rootdb.ini ---- [londiste3] job_name = st3_rootdb db = dbname=rootdb queue_name = replika logfile = st3partsplit/log/st3_rootdb.log pidfile = st3partsplit/pid/st3_rootdb.pid ---- st3partsplit/st3_sharddb_0.ini ---- [londiste3] job_name = st3_sharddb_0 db = dbname=sharddb_0 queue_name = replika logfile = st3partsplit/log/st3_sharddb_0.log pidfile = st3partsplit/pid/st3_sharddb_0.pid ---- st3partsplit/st3_sharddb_1.ini ---- [londiste3] job_name = st3_sharddb_1 db = dbname=sharddb_1 queue_name = replika logfile = st3partsplit/log/st3_sharddb_1.log pidfile = st3partsplit/pid/st3_sharddb_1.pid ---- Then create root node: ---- londiste3 st3partsplit/st3_rootdb.ini create-root node1 dbname=rootdb ---- And start the worker on root: ---- londiste3 -d st3partsplit/st3_rootdb.ini worker ---- And create leaf nodes and start the workers on partitions: ---- londiste3 st3partsplit/st3_sharddb_0.ini create-leaf node2_0 dbname=sharddb_0 --provider=dbname=rootdb londiste3 -d st3partsplit/st3_sharddb_0.ini worker ---- Second node: ---- londiste3 st3partsplit/st3_sharddb_1.ini create-leaf node2_1 dbname=sharddb_1 --provider=dbname=rootdb londiste3 -d st3partsplit/st3_sharddb_1.ini worker ---- Create config file st3partsplit/pgqd.ini for `pgqd` ("the ticker"): ---- [pgqd] logfile = st3partsplit/log/pgqd.log pidfile = st3partsplit/pid/pgqd.pid ---- Start the ticker process: ---- pgqd -d st3partsplit/pgqd.ini ---- Now add the replicated tables to root and partitions. Here we use `--create` switch to add them to partition, which means Londiste takes schema from root node and creates tables on target nodes automatically. The `--handler=part` tells londiste to use the `part` handler for replication, the `--handler-arg=key=*id` specifies which key field to partition on. Run command the following commands : ---- londiste3 st3partsplit/st3_rootdb.ini add-table pgbench_accounts --handler=part --handler-arg=key=aid londiste3 st3partsplit/st3_sharddb_0.ini add-table pgbench_accounts --create --handler=part --handler-arg=key=aid londiste3 st3partsplit/st3_sharddb_1.ini add-table pgbench_accounts --create --handler=part --handler-arg=key=aid londiste3 st3partsplit/st3_rootdb.ini add-table pgbench_branches --handler=part --handler-arg=key=bid londiste3 st3partsplit/st3_sharddb_0.ini add-table pgbench_branches --create --handler=part --handler-arg=key=bid londiste3 st3partsplit/st3_sharddb_1.ini add-table pgbench_branches --create --handler=part --handler-arg=key=bid londiste3 st3partsplit/st3_rootdb.ini add-table pgbench_tellers --handler=part --handler-arg=key=tid londiste3 st3partsplit/st3_sharddb_0.ini add-table pgbench_tellers --create --handler=part --handler-arg=key=tid londiste3 st3partsplit/st3_sharddb_1.ini add-table pgbench_tellers --create --handler=part --handler-arg=key=tid ---- The following command will run pgbench full speed with 5 parallel database connections for 10 seconds. ---- /usr/lib/postgresql/8.4/bin/pgbench -T 10 -c 5 rootdb ---- After this is done, you can check that the tables on both sides have the same data: ---- londiste3 st3partsplit/st3_sharddb_0.ini compare londiste3 st3partsplit/st3_sharddb_0.ini compare ---- Except of course that they don't - each partition will only have roughly half the data from the root. But the row counts and checksums of the partitions should both add up to the numbers on the master. skytools-3.2.6/doc/queue_mover3.txt0000644000000000000000000000440212426435645014237 0ustar = queue_mover3(1) = == NAME == queue_mover3 - PgQ consumer that copies data from one queue to another. == SYNOPSIS == queue_mover3 [switches] config.ini == DESCRIPTION == queue_mover is PgQ consumer that transports events from source queue into target queue. One use case is when events are produced in several databases then queue_mover is used to consolidate these events into single queue that can then be processed by consumers who need to handle theses events. For example in case of patitioned databases it's convenient to move events from each partition into one central queue database and then process them there. That way configuration and dependancies of partiton databases are simpler and more robust. Another use case is to move events from OLTP database to batch processing server. Transactionality: events will be inserted as one transaction on target side. That means only batch_id needs to be tracked on target side. == QUICK-START == Basic PgQ setup and usage can be summarized by the following steps: 1. PgQ must be installed both in source and target databases. See pgqadm man page for details. 2. Target database must also have pgq_ext schema installed. It is used to keep sync between two databases. 3. Create a queue_mover configuration file, say qmover_sourceq_to_targetdb.ini 4. create source and target queues $ pgqadm.py sourcedb_ticker.ini create $ pgqadm.py targetdb_ticker.ini create 5. launch queue mover in daemon mode $ queue_mover3 -d qmover_sourceq_to_targetdb.ini 6. start producing and consuming events == CONFIG == include::common.config.txt[] === queue_mover parameters === src_db:: Source database. dst_db:: Target database. dst_queue_name:: Target queue name. === Example config file === [queue_mover3] job_name = eventlog_to_target_mover src_db = dbname=sourcedb dst_db = dbname=targetdb pgq_queue_name = eventlog dst_queue_name = copy_of_eventlog pidfile = log/%(job_name)s.pid logfile = pid/%(job_name)s.log == COMMAND LINE SWITCHES == include::common.switches.txt[] == BUGS == Event ID is not kept on target side. If needed is can be kept, then event_id seq at target side need to be increased by hand to inform ticker about new events. skytools-3.2.6/doc/common.switches.txt0000644000000000000000000000114512426435645014741 0ustar Following switches are common to all skytools.DBScript-based Python programs. -h, --help:: show help message and exit -q, --quiet:: make program silent -v, --verbose:: make program more verbose -d, --daemon:: make program go background --ini:: show commented template config file. Following switches are used to control already running process. The pidfile is read from config then signal is sent to process id specified there. -r, --reload:: reload config (send SIGHUP) -s, --stop:: stop program safely (send SIGINT) -k, --kill:: kill program immidiately (send SIGTERM) skytools-3.2.6/doc/pgq-sql.txt0000644000000000000000000001021112426435645013177 0ustar = PgQ - queue for PostgreSQL = == Queue creation == pgq.create_queue(queue_name text) Initialize event queue. Returns 0 if event queue already exists, 1 otherwise. == Producer == pgq.insert_event(queue_name text, ev_type, ev_data) pgq.insert_event(queue_name text, ev_type, ev_data, extra1, extra2, extra3, extra4) Generate new event. This should be called inside main tx - thus rollbacked with it if needed. == Consumer == pgq.register_consumer(queue_name text, consumer_id text) Attaches this consumer to particular event queue. Returns 0 if the consumer was already attached, 1 otherwise. pgq.unregister_consumer(queue_name text, consumer_id text) Unregister and drop resources allocated to customer. pgq.next_batch(queue_name text, consumer_id text) Allocates next batch of events to consumer. Returns batch id (int8), to be used in processing functions. If no batches are available, returns NULL. That means that the ticker has not cut them yet. This is the appropriate moment for consumer to sleep. pgq.get_batch_events(batch_id int8) `pgq.get_batch_events()` returns a set of events in this batch. There may be no events in the batch. This is normal. The batch must still be closed with pgq.finish_batch(). Event fields: (ev_id int8, ev_time timestamptz, ev_txid int8, ev_retry int4, ev_type text, ev_data text, ev_extra1, ev_extra2, ev_extra3, ev_extra4) pgq.event_failed(batch_id int8, event_id int8, reason text) Tag event as 'failed' - it will be stored, but not further processing is done. pgq.event_retry(batch_id int8, event_id int8, retry_seconds int4) Tag event for 'retry' - after x seconds the event will be re-inserted into main queue. pgq.finish_batch(batch_id int8) Tag batch as finished. Until this is not done, the consumer will get same batch again. After calling finish_batch consumer cannot do any operations with events of that batch. All operations must be done before. == Failed queue operation == Events tagged as failed just stay on their queue. Following functions can be used to manage them. pgq.failed_event_list(queue_name, consumer) pgq.failed_event_list(queue_name, consumer, cnt, offset) pgq.failed_event_count(queue_name, consumer) Get info about the queue. Event fields are same as for pgq.get_batch_events() pgq.failed_event_delete(queue_name, consumer, event_id) pgq.failed_event_retry(queue_name, consumer, event_id) Remove an event from queue, or retry it. == Info operations == pgq.get_queue_info() Get list of queues. Result: (queue_name, queue_ntables, queue_cur_table, queue_rotation_period, queue_switch_time, queue_external_ticker, queue_ticker_max_count, queue_ticker_max_lag, queue_ticker_idle_period, ticker_lag) pgq.get_consumer_info() pgq.get_consumer_info(queue_name) pgq.get_consumer_info(queue_name, consumer) Get list of active consumers. Result: (queue_name, consumer_name, lag, last_seen, last_tick, current_batch, next_tick) pgq.get_batch_info(batch_id) Get info about batch. Result fields: (queue_name, consumer_name, batch_start, batch_end, prev_tick_id, tick_id, lag) == Notes == Consumer *must* be able to process same event several times. == Example == First, create event queue: select pgq.create_queue('LogEvent'); Then, producer side can do whenever it wishes: select pgq.insert_event('LogEvent', 'data', 'DataFor123'); First step for consumer is to register: select pgq.register_consumer('LogEvent', 'TestConsumer'); Then it can enter into consuming loop: begin; select pgq.next_batch('LogEvent', 'TestConsumer'); [into batch_id] commit; That will reserve a batch of events for this consumer. To see the events in batch: select * from pgq.get_batch_events(batch_id); That will give all events in batch. The processing does not need to be happen all in one transaction, framework can split the work how it wants. If a events failed or needs to be tried again, framework can call: select pgq.event_retry(batch_id, event_id, 60); select pgq.event_failed(batch_id, event_id, 'Record deleted'); When all done, notify database about it: select pgq.finish_batch(batch_id) skytools-3.2.6/doc/Makefile0000644000000000000000000001057112426435645012523 0ustar include ../config.mak web = mkz@pgf:/home/pgfoundry.org/groups/skytools/htdocs/skytools-3.0 EPYDOC = epydoc EPYARGS = --no-private --url="http://pgfoundry.org/projects/skytools/" \ --name="Skytools" --html --no-private -v TOPHTML = README.html INSTALL.html index.html DOCHTML = \ TODO.html pgq-sql.html pgq-nodupes.html \ faq.html set.notes.html skytools3.html devnotes.html pgqd.html \ londiste3.html walmgr3.html qadmin.html scriptmgr.html \ sql-grants.html \ skytools_upgrade.html queue_mover3.html queue_splitter3.html \ simple_consumer3.html simple_local_consumer3.html \ howto/londiste3_cascaded_rep_howto.html \ howto/londiste3_merge_howto.html \ howto/londiste3_partitioning_howto.html \ howto/londiste3_simple_rep_howto.html \ howto/setup_walmgr_replication.html MAN5 = MAN1_SFX = scriptmgr.1 skytools_upgrade.1 MAN1 = qadmin.1 pgqd.1 walmgr3.1 londiste3.1 simple_consumer3.1 simple_local_consumer3.1 \ queue_mover3.1 queue_splitter3.1 FQHTML = $(addprefix html/doc/, $(DOCHTML)) $(addprefix html/, $(TOPHTML)) FQMAN1 = $(addprefix man/, $(MAN1)) FQMAN1_SFX = $(addprefix man/, $(MAN1_SFX)) FQMAN5 = $(addprefix man/, $(MAN5)) FQMAN = $(FQMAN1) $(FQMAN1_SFX) $(FQMAN5) COMMON = common.switches.txt common.config.txt GETATTRS = $(PYTHON) ../misc/getattrs.py FIXMAN = $(PYTHON) ../misc/fixman.py #AFLAGS = -a linkcss #AFLAGS = -a stylesheet=extra.css all: $(FQMAN) man: $(FQMAN) html: $(FQHTML) install: $(FQMAN) mkdir -p $(DESTDIR)/$(mandir)/man1 mkdir -p $(DESTDIR)/$(mandir)/man5 mkdir -p $(DESTDIR)/$(docdir) for m in $(FQMAN1_SFX); do \ xf="`basename $$m | sed 's/[.]/$(SUFFIX)./'`"; \ install -m 644 $$m $(DESTDIR)/$(mandir)/man1/$$xf || exit 1; \ done for m in $(FQMAN1); do \ install -m 644 $$m $(DESTDIR)/$(mandir)/man1 || exit 1; \ done for m in $(FQMAN5); do \ install -m 644 $$m $(DESTDIR)/$(mandir)/man5 || exit 1; \ done htmlinstall: $(FQHTML) for h in $(FQHTML); do \ install -m 644 $$h $(DESTDIR)/$(docdir) || exit 1; \ done PY_PKGS = skytools pgq londiste # skytools.config skytools.dbstruct skytools.gzlog \ # skytools.quoting skytools.scripting skytools.sqltools \ # pgq pgq.consumer pgq.event pgq.maint pgq.producer pgq.status pgq.ticker \ # londiste londiste.compare londiste.file_read londiste.file_write \ # londiste.installer londiste.playback londiste.repair londiste.setup \ # londiste.syncer londiste.table_copy apidoc: rm -rf html/api mkdir -p html/api cd ../python && $(EPYDOC) $(EPYARGS) -o ../doc/html/api $(PY_PKGS) cd ../sql/pgq && rm -rf docs/html && $(MAKE) dox && cp -rp docs/html ../../doc/html/pgq cd ../sql/pgq_coop && rm -rf docs/html && $(MAKE) dox && cp -rp docs/html ../../doc/html/pgq_coop cd ../sql/pgq_node && rm -rf docs/html && $(MAKE) dox && cp -rp docs/html ../../doc/html/pgq_node cd ../sql/londiste && rm -rf docs/html && $(MAKE) dox && cp -rp docs/html ../../doc/html/londiste cd ../sql/pgq_ext && rm -rf docs/html && $(MAKE) dox && cp -rp docs/html ../../doc/html/pgq_ext apiupload: apidoc -rsync -rtlz api/* $(web)/api -rsync -rtlz ../sql/pgq/docs/html/* $(web)/pgq/ -rsync -rtlz ../sql/pgq_coop/docs/html/* $(web)/pgq_coop/ -rsync -rtlz ../sql/pgq_node/docs/html/* $(web)/pgq_node/ -rsync -rtlz ../sql/londiste/docs/html/* $(web)/londiste/ clean: rm -rf html *.xml api distclean: clean rm -rf ../sql/pgq/docs/pgq realclean: distclean rm -rf man *.xml *.html ifneq ($(ASCIIDOC),no) ifneq ($(XMLTO),no) man/%.xml: %.txt $(COMMON) @mkdir -p man $(ASCIIDOC) -b docbook -d manpage `$(GETATTRS) $<` -o $@ $< man/%.1: man/%.xml @mkdir -p man $(XMLTO) -o man man $< endif html/doc/%.html: %.txt $(COMMON) @mkdir -p $(dir $@) LANG=C cat $< \ | sed -e '/^include/b' -e 's,\([A-Za-z.0-9]*\)[.]txt,link:\1.html[],g' \ | $(ASCIIDOC) $(AFLAGS) -a toc `$(GETATTRS) $<` -o - - \ | sed -e "/^[/][*] Workarounds/r ../misc/extra.css" \ > $@ html/README.html: ../README @mkdir -p $(dir $@) cat $< \ | sed -e 's,doc/\([!-~]*\)[.]txt,link:\1.html[],g' \ -e 's,http:[!-~]*,&[],g' \ | $(ASCIIDOC) $(AFLAGS) -a toc -o - - \ | sed -e "/^[/][*] Workarounds/r ../misc/extra.css" \ > $@ html/INSTALL.html: ../INSTALL @mkdir -p $(dir $@) $(ASCIIDOC) $(AFLAGS) -o - $< \ | sed -e "/^[/][*] Workarounds/r ../misc/extra.css" \ > $@ html/index.html: index.txt @mkdir -p $(dir $@) $(ASCIIDOC) $(AFLAGS) -o - $< \ | sed -e "/^[/][*] Workarounds/r ../misc/extra.css" \ > $@ endif web: $(FQHTMLS) rsync -avz html/* $(web)/ skytools-3.2.6/doc/simple_local_consumer3.txt0000644000000000000000000000110212426435645016253 0ustar = simple_local_consumer3(1) = == NAME == simple_local_consumer3 - PgQ consumer that executes query for each row == SYNOPSIS == simple_local_consumer3.py [switches] config.ini == DESCRIPTION == For each event in batch it will execute query, filling event values into it. Transactionality: query is executed in autocommit mode, completed batch is tracked in local file. It can be switched between nodes in cascaded queue. == CONFIG == Run `simple_local_consumer3 --ini` to see commented config template. == COMMAND LINE SWITCHES == include::common.switches.txt[] skytools-3.2.6/doc/index.txt0000644000000000000000000000332512426435645012732 0ustar = Skytools 3 Documentation = == Overview == * link:README.html[Package overview] * link:INSTALL.html[Installation help] * link:doc/faq.html[FAQ] * link:doc/skytools3.html[Skytools 3.0 Release Notes] == HOWTOs == * link:doc/howto/londiste3_simple_rep_howto.html[Setting up simple replication with 2 nodes] * link:doc/howto/londiste3_cascaded_rep_howto.html[Setting up cascaded replication with 5 nodes] * link:doc/howto/londiste3_merge_howto.html[Merging 2 partitions to one big database] * link:doc/howto/londiste3_partitioning_howto.html[Replicating from one master to 2 partitions] * link:doc/howto/setup_walmgr_replication.html[Setting up walmgr] == Manpages == * link:doc/londiste3.html[londiste3] - Londiste command line interface. * link:doc/qadmin.html[qadmin] - psql-like console for managing queues * link:doc/queue_mover3.html[queue_mover3] - copy queue to another database * link:doc/queue_splitter3.html[queue_splitter3] - split queue into different queues * link:doc/scriptmgr.html[scriptmgr] - bulk start/stopping of skytools scripts * link:doc/skytools_upgrade.html[skytools_upgrade] - Update database modules * link:doc/walmgr3.html[walmgr3] - tool for managing WAL-base replication == API docs == * Python link:api/[API] documentation * SQL API documentation: - link:pgq[]: SQL module for generic queue - link:pgq_coop[]: SQL module for sharing workload - link:pgq_ext[]: batch tracking in target database - link:pgq_node[]: cascaded queueing - link:londiste[]: Londiste state == Internal == * link:doc/devnotes.html[Notes for contributors] * link:doc/set.notes.html[Technical notes for cascading] * link:doc/TODO.html[TODO list] // == Old docs == // * link:doc/pgq-nodupes.html[] // * link:doc/pgq-sql.html[] skytools-3.2.6/doc/TODO.txt0000644000000000000000000000635212426435645012373 0ustar = Skytools ToDo list = Gut feeling about priorities: High:: Needed soon. Medium:: Good if done, but can be postponed. Low:: Interesting idea, but OK if not done. == Medium Priority == * tests: takeover testing - wal behind - wal ahead - branch behind * londiste takeover: check if all tables exist and are in sync. Inform user. Should the takeover stop if problems? How can such state be checked on-the-fly? Perhaps `londiste missing` should show in-copy tables. * cascade takeover: wal failover queue sync. WAL-failover can be behind/ahead from regular replication with partial batch. Need to look up-batched events in wal-slave and full-batches on branches and sync them together. this should also make non-wal branch takeover with branch thats behind the others work - it needs to catch up with recent events. . Load top-ticks from branches . Load top-tick from new master, if ahead from branches all ok . Load non-batched events from queue (ev_txid not in tick_snapshot) . Load partial batch from branch . Replay events that do not exists . Replay rest of batches fully . Promote to root * tests for things that have not their own regtests or are not tested enough during other tests: - pgq.RemoteConsumer - pgq.CoopConsumer - skytools.DBStruct - londiste handlers * londiste add-table: automatic serial handling, --noserial switch? Currently, `--create-full` does not create sequence on target, even if source table was created with `serial` column. It does associate column with sequence if that exists, but it requires that it was created previously. * pgqd: rip out compat code for pre-pgq.maint_operations() schemas. All the maintenance logic is in DB now. * qadmin: merge cascade commands (medium) - may need api redesign to avoid duplicating pgq.cascade code? * londiste replay: when buffering queries, check their size. Current buffering is by count - flushed if 200 events have been collected. That does not take account that some rows can be very large. So separate counter for len(ev_data) needs to be added, that flushes if buffer would go over some specified amount of memory. == Low Priority == * dbscript: switch (-q) for silence for cron/init scripts. Dunno if we can override loggers loaded from skylog.ini. Simply redirecting fds 0,1,2 to /dev/null should be enough then. * londiste: support creating slave from master by pg_dump / PITR. Take full dump from root or failover-branch and turn it into another branch. . Rename node . Check for correct epoch, fix if possible (only for pg_dump) . Sync batches (wal-failover should have it) * londiste copy: async conn-to-conn copy loop in Python/PythonC. Currently we simply pipe one copy_to() to another copy_from() in blocked manner with large buffer, but that likely halves the potential throughput. * qadmin: multi-line commands. The problem is whether we can use python's readline in a psql-like way. * qadmin: recursive parser. Current non-recursive parser cannot express complex grammar (SQL). If we want SQL auto-completion, recursive grammar is needed. This would also simplify current grammar. 1. On rule reference, push state to stack 2. On rule end, pop state from stack. If empty then done. skytools-3.2.6/doc/pgq-nodupes.txt0000644000000000000000000000266312426435645014071 0ustar = Avoiding duplicate events = It is pretty burdensome to check if event is already processed, especially on bulk data moving. Here's a way how this can be avoided. First, consumer must guarantee that it processes all events in one tx. Consumer itself can tag events for retry, but then it must be able to handle them later. == Only one db == If the PgQ queue and event data handling happen in same database, the consumer must simply call pgq.finish_batch() inside the event-processing transaction. == Several databases == If the event processing happens in different database, the consumer must store the batch_id into destination database, inside the same transaction as the event processing happens. - Only after committing it, consumer can call pgq.finish_batch() in queue database and commit that. - As the batches come in sequence, there's no need to remember full log of batch_id's, it's enough to keep the latest batch_id. - Then at the start of every batch, consumer can check if the batch_id already exists in destination database, and if it does, then just tag batch done, without processing. With this, there's no need for consumer to check for already processed events. == Note == This assumes the event processing is transaction-able - failures will be rollbacked. If event processing includes communication with world outside database, eg. sending email, such handling won't work. skytools-3.2.6/doc/walmgr3.txt0000644000000000000000000003462712426435645013210 0ustar = walmgr3(1) = == NAME == walmgr3 - tool for managing WAL-based replication for PostgreSQL. == SYNOPSIS == walmgr3 command [--not-really] [options] == DESCRIPTION == Walmgr3 is a tool to handle replication of PostgreSQL with PITR (also known as Log Shipping). This script allows to setup and administer the replication, it is also used by PostgreSQL to archive and restore the WAL files. See <> below to start your first log shipping in few steps. The 'command' is one of the Master, Slave, Common or Internal commands listed below. == GENERAL OPTIONS == -V, --version:: Print version info and exit. -h, --help:: Show this help message and exit. -q, --quiet:: Log only errors and warnings. -v, --verbose:: Log verbosely. -d, --daemon:: Run in daemon mode (go background). -n, --not-really:: Show what would be done without actually doing anything. == SPECIFIC OPTIONS == --ini:: Display sample ini file. --set="'param=val[,param=value]'":: Override config setting --init-master:: Initialize Master walmgr3 configuration. --init-slave:: Initialize Slave walmgr3 configuration. --config-dir='filepath':: Configuration file location for `--init-X commands`. --slave='hostname':: Slave host name. --pgdata='path':: PostgreSQL data directory. --ssh-keygen:: Generate a SSH key pair if needed (used in Master). --ssh-add-key='keyfile.pub':: Add the public key file to authorized_hosts file (used in Slave). --ssh-remove-key='ssh_key':: Remove Master ssh key from authorized_hosts file (used in Slave). --primary-conninfo='connection_string':: Provide the connection string to the streaming replication Master (used in Slave). --add-password='plaintext_file':: Add password for streaming replication from plain text file to .pgpass. Additional fields for password file entry will be extracted from primary-conninfo (used in Slave). --remove-password:: Remove .pgpass entry, which was used for streaming replication (used in Slave) --synch-standby='synchronous_standby_names':: Do the same thing as command synch-standby, but walmgr ini file is not used. This option can be used when walmgr ini is not available. It tries to guess the postgres config location, --pgdata option may also be needed. (used in Master) == DAEMON OPTIONS == -r, --reload:: Reload config (send SIGHUP). -s, --stop:: Stop program safely (send SIGINT). -k, --kill:: Kill program immediately (send SIGTERM). == MASTER COMMANDS == === setup === Sets up PostgreSQL for WAL archiving, creates necessary directory structures on Slave. === sync === Copies in-progress WAL files to Slave. === syncdaemon === Start WAL synchronization in daemon mode. This will start periodically synching the in-progress WAL files to Slave. The following configuration parameters are used to drive the syncdaemon: - *loop_delay* - how long to sleep between the synchs. - *use_xlog_functions* - use record based shipping to synchronize in-progress WAL segments. === stop === Stop archiving and de-configure PostgreSQL archiving. === periodic === Runs periodic command if configured. This enables to execute arbitrary commands on interval, useful for synchronizing scripts, config files, crontabs etc. === synch-standby === Enables/disables synchronous streaming replication for given application name(s). Does additional check before enabling synchronous mode. == SLAVE COMMANDS == === boot === Stop WAL playback and bring the database up so it can accept queries. === pause === Pauses WAL playback. === continue === Continues previously paused WAL playback. === createslave === Creates backup from Master database using streaming replication. Also creates recovery.conf and starts slave standby. Backup is created with pg_basebackup and pg_receivexlog (available in 9.2 and up). == COMMON COMMANDS == === listbackups === Lists available backups on Slave node. === backup === Creates a new base backup from Master database. Will purge expired backups and WAL files on Slave if `keep_backups` is not specified. During a backup a lock file is created in Slave `completed_wals` directory. This is to prevent simultaneous backups and resulting corruption. If running backup is terminated, the BACKUPLOCK file may have to be removed manually. EXPERIMENTAL: If run on Slave, creates backup from in-recovery Slave data. WAL playback is paused, Slave data directory is backed up to `full_backup` directory and WAL playback is resumed. Backups are rotated as needed. The idea is to move the backup load away from production node. Usable from PostgreSQL 8.2 and up. === restore [src[dst]] === Restores the specified backup set to target directory. If specified without arguments the latest backup is *moved* to Slave data directory (doesn't obey retention rules). If `src` backup is specified the backup is copied (instead of moved). Alternative destination directory can be specified with `dst`. === cleanup === Cleanup any walmgr3 files after stop. == INTERNAL COMMANDS == === xarchive === On Master, archive one WAL file. === xrestore [last restartpoint wal] === On Slave, restore one WAL file. === xlock === On Master, create lock file to deny other concurrent backups. === xrelease === On Slave, remove backup lock file, allow other backup to run. === xrotate === Rotate backups by increasing backup directory suffixes. Note that since we also have to make room for next backup, we actually have *keep_backups - 1* backups available after this. Unneeded WAL files are not removed here, it is handled by `xpurgewals` command instead. === xpurgewals === On Slave, remove WAL files not needed for recovery. === xpartialsync === Read 'bytes' worth of data from stdin, append to the partial WAl file starting from 'offset'. On error it is assumed that master restarts from zero. The resulting file is always padded to XLOG_SEGMENT_SIZE bytes to simplify recovery. == CONFIGURATION == === Common settings === ==== job_name ==== Optional. Indentifies this script, used in logging. Keep unique if using central logging. ==== logfile ==== Where to log. ==== use_skylog ==== Optional. If nonzero, 'skylog.ini' is used for log configuration. === Master settings === ==== pidfile ==== Pid file location for syncdaemon mode (if running with -d). Otherwise not required. ==== master_db ==== Database to connect to for pg_start_backup(), etc. It is not a good idea to use `dbname=template` if running syncdaemon in record shipping mode. ==== master_data ==== Master data directory location. ==== master_config ==== Master postgresql.conf file location. This is where `archive_command` gets updated. ==== master_restart_cmd ==== The command to restart Master database, this used after changing `archive_mode` parameter. Leave unset if you cannot afford to restart the database at setup/stop. ==== slave ==== Slave host and base directory. ==== slave_config ==== Configuration file location for the Slave walmgr3. ==== completed_wals ==== Slave directory where archived WAL files are copied. ==== partial_wals ==== Slave directory where incomplete WAL files are stored. ==== full_backup ==== Slave directory where full backups are stored. ==== config_backup ==== Optional. Slave directory where configuration file backups are stored. ==== loop_delay ==== The frequency of syncdaemon updates. In record shipping mode only incremental updates are sent, so smaller interval can be used. ==== use_xlog_functions ==== Use pg_xlog functions for record based shipping (available in 8.2 and up). ==== compression ==== If nonzero, a `-z` flag is added to rsync cmdline. It reduces network traffic at the cost of extra CPU time. ==== keep_symlinks ==== Keep symlinks for `pg_xlog` and `pg_log`. ==== hot_standby ==== If set to 1, walmgr3 setup will set `wal_level` to `hot_standby` (PostgreSQL 9.0 and newer). ==== command_interval ==== How ofter to run periodic command script. In seconds, and only evaluated at log switch times. ==== periodic_command ==== Shell script to be executed at specified time interval. Can be used for synchronizing scripts, config files etc. === Sample master.ini === [walmgr] job_name = wal-master logfile = ~/log/%(job_name)s.log pidfile = ~/pid/%(job_name)s.pid use_skylog = 1 master_db = dbname=my_db master_data = /var/lib/postgresql/9.1/main master_config = /etc/postgresql/9.1/main/postgresql.conf master_bin = /usr/lib/postgresql/9.1/bin # set this only if you can afford database restarts during setup and stop. #master_restart_cmd = /etc/init.d/postgresql-9.1 restart slave = slave-host slave_config = /var/lib/postgresql/conf/wal-slave.ini walmgr_data = /var/lib/postgresql/walshipping completed_wals = %(walmgr_data)s/logs.complete partial_wals = %(walmgr_data)s/logs.partial full_backup = %(walmgr_data)s/data.master config_backup = %(walmgr_data)s/config.backup # syncdaemon update frequency loop_delay = 10.0 # use record based shipping available since 8.2 use_xlog_functions = 0 # pass -z to rsync, useful on low bandwidth links compression = 0 # keep symlinks for pg_xlog and pg_log keep_symlinks = 1 # tell walmgr to set wal_level to hot_standby during setup #hot_standby = 1 # periodic sync #command_interval = 600 #periodic_command = /var/lib/postgresql/walshipping/periodic.sh === Slave settings === ==== slave_data ==== PostgreSQL data directory for the Slave. This is where the restored backup is copied/moved. ==== slave_bin ==== Specifies the location of PostgreSQL binaries (pg_controldata, etc). Needed if they are not already in the PATH. ==== slave_stop_cmd ==== Script to stop PostgreSQL on Slave. ==== slave_start_cmd ==== Script to start PostgreSQL on Slave. ==== slave_config_dir ==== Directory for PostgreSQL configuration files. If specified, `walmgr3 restore` attempts to restore configuration files from `config_backup` directory. ==== slave_pg_xlog ==== Set to the directory on the Slave where pg_xlog files get written to. On a restore to the Slave walmgr3 will create a symbolic link from data/pg_xlog to this location. ==== completed_wals ==== Directory where complete WAL files are stored. Also miscellaneous control files are created in this directory (BACKUPLOCK, STOP, PAUSE, etc.). ==== partial_wals ==== Directory where partial WAL files are stored. ==== full_backup ==== Directory where full backups are stored. ==== config_backup ==== Optional. Slave directory where configuration file backups are stored. ==== backup_datadir ==== Set `backup_datadir` to 'no' to prevent walmgr3 from making a backup of the data directory when restoring to the Slave. This defaults to 'yes'. ==== keep_backups ==== Number of backups to keep. Also all WAL files needed to bring earliest backup up to date are kept. The backups are rotated before new backup is started, so at one point there is actually one less backup available. It probably doesn't make sense to specify `keep_backups` if periodic backups are not performed - the WAL files will pile up quickly. Backups will be named data.master, data.master.0, data.master.1 etc. ==== archive_command ==== Script to execute before rotating away the oldest backup. If it fails backups will not be rotated. ==== primary_conninfo ==== Primary database connection string for hot standby - enabling this will cause the Slave to be started in hot standby mode. === Sample slave.ini === [walmgr] job_name = wal-slave logfile = ~/log/%(job_name)s.log use_skylog = 1 slave_data = /var/lib/postgresql/9.1/main slave_bin = /usr/lib/postgresql/9.1/bin slave_stop_cmd = /etc/init.d/postgresql-9.1 stop slave_start_cmd = /etc/init.d/postgresql-9.1 start slave_config_dir = /etc/postgresql/9.1/main # alternative pg_xlog directory for slave, symlinked to pg_xlog on restore #slave_pg_xlog = /vol2/pg_xlog walmgr_data = ~/walshipping completed_wals = %(walmgr_data)s/logs.complete partial_wals = %(walmgr_data)s/logs.partial full_backup = %(walmgr_data)s/data.master config_backup = %(walmgr_data)s/config.backup backup_datadir = yes keep_backups = 0 archive_command = # primary database connect string for hot standby -- enabling # this will cause the slave to be started in hot standby mode. #primary_conninfo = host=master port=5432 user=postgres == EXIT STATUS == 0:: Successful program execution. == ENVIRONMENT == PostgreSQL environment variables can be used. == QUICK START [[quick_start]] == 1. Set up passwordless ssh-key on Master and write configuration file master$ walmgr3 --ssh-keygen --init-master --slave 2. Set up passwordless ssh authentication from Master to Slave and write configuration file on Slave slave$ walmgr3 --init-slave --ssh-add-key=/tmp/id_rsa.pub 3. Logging setup on Master and Slave master$ cp skylog.ini ~postgres/ slave$ cp skylog.ini ~postgres/ 4. Start archival process and create a base backup master$ walmgr3 /var/lib/postgresql/conf/wal-master.ini setup master$ walmgr3 /var/lib/postgresql/conf/wal-master.ini backup CAUTION: starting from PostgreSQL 8.3 the archiving is enabled by setting archive_mode GUC to on. However changing this parameter requires the server to be restarted. 5. Prepare postgresql.conf and pg_hba.conf on Slave and start replay slave$ walmgr3 /var/lib/postgresql/conf/wal-slave.ini restore For debian based distributions the standard configuration files are located in /etc/postgresql/x.x/main directory. If another scheme is used the postgresql.conf and pg_hba.conf should be copied to slave full_backup directory. Make sure to disable archive_command in slave config. 'walmgr3 restore' moves data in place, creates recovery.conf and starts postmaster in recovery mode. 6. In-progress WAL segments can be backup by command: master$ walmgr3 /var/lib/postgresql/conf/wal-master.ini sync 7. If need to stop replay on Slave and boot into normal mode, do: slave$ walmgr3 /var/lib/postgresql/conf/wal-slave.ini boot skytools-3.2.6/doc/qadmin.txt0000644000000000000000000001443612426435645013101 0ustar = qadmin(1) = == NAME == qadmin - Easy to use admin console to examine and administer PgQ queues. == SYNOPSIS == qadmin [options] == DESCRIPTION == This is a psql-like console for queue administration. The console offers a large number of commands to setup, control and manage PgQ queueing system. It also offers a non-interactive mode to run one or more commands. qadmin keeps its history file in the home of the user (`~/.qadmin_history`). == GENERAL OPTIONS == --help:: Help screen. --version:: Print version. == CONNECTION OPTIONS == -h host:: Specify host to connect to (default: localhost via unix socket). -p port:: Specify port. -U user:: Specify user name. -d dbname:: Database name. -Q queuename:: Queue name, it is used as the `default queue` if it is provided. == SPECIFIC OPTIONS == -c 'cmd_string':: Execute console command. -f 'execfile':: Execute file containing console commands. == CONNECTION COMMANDS == qadmin offers to connect to other databases or queues from the console itself. === connect [queue=]; === Connect the console with the specified connection string, optional parameter to set the default queue on connection. === connect [queue=] [node=]; === Connect the console to the specified queue and/or node. == MANAGEMENT COMMANDS == === install pgq | londiste; === Install PgQ or Londiste to the connected database. === create queue ; === Create the specified queue. === alter queue set param =,; === Set one or more parameters on one or all queues at once. === drop queue ; === Drop the named queue. === register consumer [on | at | copy ]; === Register a consumer on a queue, or at a specified tick or based on another consumer. === unregister consumer [from ]; === Unregister one or all consumers, if the console is not connected to a queue, its name must be provided. === register subconsumer for [on ]; === Register a subconsumer to a consumer, if the console is not connected to a queue, its name must be provided. === unregister subconsumer for [from ] [close [batch]]; === Unregister one or all subconsumers from a consumer, if the console is not connected to a queue, its name must be provided. Current batch can be aborted if the `close batch` subcommand is provided. == SHOW COMMANDS == === show help; === Show all the console commands. === show queue [ ]; === Show details of one or all queues. === show table ; === Show DDL for the specified table. === show sequence ; === Show DDL for the specified sequence. === show consumer [ [on ] ]; === Show details of one or all consumers on one or all queues. === show node [ [on ] ]; === Show details of one or all nodes on one or all queues. === show batch ; === Show details of the batch, default queue must be set (see `connect queue`) === show batch ; === Show details of the current batch for the specified consumer, default queue must be set (see `connect queue`) == LONDISTE COMMANDS == All these commands are applied on the node where the console is connected to. === londiste add table [with ... ] === with no_triggers:: Skip trigger creation. with skip_truncate:: Does not truncate the table on the destination. with expect_sync:: Set table state to 'ok'. with tgflags='IUDBAQLS':: Trigger creation flags, see below for details. with backup:: Put urlencoded contents of old row to `ev_extra2`. with skip:: Create skip trigger. Same as S flag. with when='expr':: If 'expr' returns false, do not insert event. with ev_XX='EXPR':: Overwrite default ev_* columns (see below). Trigger creation flags (default: AIUDL): - I - ON INSERT - U - ON UPDATE - D - ON DELETE - Q - use pgq.sqltriga() as trigger function - L - use pgq.logutriga() as trigger function - B - BEFORE - A - AFTER - S - SKIP Queue event fields: - ev_type - I/U/D - ev_data - partial SQL statement - ev_extra1 - table name - ev_extra2 - optional urlencoded backup === londiste add sequence ; === Add the specified sequence to Londiste replication. === londiste remove table [,tbl]; === Remove the specified table(s) from the Londiste replication. === londiste remove sequence [,seq]; === Remove the specified sequence(s) from the Londiste replication. === londiste tables; === List registered tables and informations about them: - table_name - fully-qualified table name - local - does events needs to be applied to local table - merge_state - show phase of initial copy - custom_snapshot - remote snapshot of COPY transaction - table_attrs - urlencoded dict of table attributes - dropped_ddl - partition combining: temp place to put DDL - copy_role - partition combining: how to handle copy - copy_pos - position in parallel copy working order ==== copy_role = lead ==== On copy start, drop indexes and store in dropped_ddl. On copy finish change state to catching-up, then wait until copy_role turns to NULL. Catching-up: if dropped_ddl is not NULL, restore them. ==== copy_role = wait-copy ==== On copy start, wait until role changes (to wait-replay). ==== copy_role = wait-replay ==== On copy finish, tag as 'catching-up'. Wait until copy_role is NULL, then proceed. === londiste seqs; === List registered sequences on this node and their last value. === londiste missing; === On Master, list tables not registered on set. On Slave, list tables on set but not registered locally. == OTHER COMMANDS == === exit; === Quit program. === ^D === Quit program. === ^C === Clear current buffer. == EXIT STATUS == 0:: Successful program execution. == ENVIRONMENT == PostgreSQL environment variables can be used. == NOT IMPLEMENTED COMMANDS == TODO : is it up-to-date ? - create node location [on ]; - alter node provider ; - alter node takeover with all; - alter node rename ; - alter node [location=] - drop node [on ]; - takeover ; - show cascade; - show_queue_stats ; - status skytools-3.2.6/.gitmodules0000644000000000000000000000011312426435645012462 0ustar [submodule "lib"] path = lib url = git://github.com/markokr/libusual.git skytools-3.2.6/config.mak.in0000644000000000000000000000155012426435645012657 0ustar PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PACKAGE_STRING = @PACKAGE_STRING@ SUFFIX = @SUFFIX@ prefix = @prefix@ datarootdir = @datarootdir@ exec_prefix = @exec_prefix@ datadir = @datadir@ docdir = @docdir@$(SUFFIX) mandir = @mandir@ bindir = @bindir@ PYTHON = @PYTHON@ PG_CONFIG = @PG_CONFIG@ # additional CPPFLAGS to pgxs modules PG_CPPFLAGS = $(filter -DHAVE%, @DEFS@) DESTDIR = ASCIIDOC = @ASCIIDOC@ XMLTO = @XMLTO@ SED = @SED@ GREP = @GREP@ EGREP = @EGREP@ MKDIR_P = @MKDIR_P@ LN_S = @LN_S@ CC = @CC@ CPPFLAGS = @CPPFLAGS@ CFLAGS = @CFLAGS@ @WFLAGS@ LDFLAGS = @LDFLAGS@ LIBS = @LIBS@ SHELL = @SHELL@ INSTALL = @INSTALL@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_DATA = @INSTALL_DATA@ BININSTALL = $(INSTALL_SCRIPT) SKYLOG = @SKYLOG@ SK3_SUBDIR = @SK3_SUBDIR@ skytools-3.2.6/INSTALL0000644000000000000000000000600612426435645011345 0ustar = SkyTools - tools for PostgreSQL = == Building == Skytools modules use some other code to run, so you need to install the following dependencies on the system where you want to build and run skytools based applications. === Build dependencies === postgresql libpq-dev postgresql-server-dev python python-dev When building code from GIT: git autoconf automake asciidoc xmlto libtool === Runtime dependencies === python psycopg2 rsync == Building from source tarball == $ ./configure --prefix=... $ make $ make install == Building from GIT == Fetch git tree: $ git clone git://github.com/markokr/skytools.git Fetch libusual submodule: $ git submodule init $ git submodule update Generate ./configure script: $ ./autogen.sh Now build as usual: $ ./configure --prefix=... $ make == Building a debian package == The main Makefile provides a target for building a +.deb+ package suitable for installation. The following additional packages are needed to build the debian package: devscripts autotools-dev python-all-dev python-support xmlto asciidoc libevent-dev libpq-dev postgresql-server-dev-all Then build: $ make deb == Optional arguments to ./configure == The following configure options are available: --prefix:: Path prefix where to install skytools files (default: /usr/local) --with-python:: Name or full path of the Python executable to use. (default: python) --with-pgconfig:: Name or full path to pg_config (default: pg_config) --with-asciidoc:: If asciidoc should be used to re-generate manpages. Required when building from GIT as the generated pages are not in repo. == Building without Makefile == It is possible to build and install Python part of Skytools only. This is useful when no required build / devel tools (dependencies) are available (typically on Windows platform). Commands to run vary across platforms but usually go like this: $ [sudo] python setup_pkgloader.py install $ [sudo] python setup_skytools.py install [--sk3-subdir] == Documentation == Skytools documentation consists of text files, some of them in the asciidoc format, from which you can produce man and html outputs: $ cd doc $ make man $ make html You need asciidoc and xmlto packages for this to work. == Installation == === Skytools === You have to install skytools on the machines where you want to run it, obviously. The installation of the skytools provides PgQ, Londiste and walmgr. === Londiste === In the londiste case, as the replication process will run on only one machine (either provider, subscriber or a tier box), things are not that simple anymore. But simple nonetheless: install PgQ on the machine where you want to run it, provider being the straightforward choice. Then install londiste only on the box from where you want to launch the replication process, a good default being the subscriber box this time. You can then install londiste on provider from the subscriber box with the +londiste.py+ command line tool. == Usage == Please refer to the documentation. skytools-3.2.6/README0000644000000000000000000000746512426435645011206 0ustar = SkyTools - tools for PostgreSQL = This is a package of tools in use in Skype for replication and failover. It also includes a generic queuing mechanism called PgQ and a utility library for Python scripts, as well as a script for setting up and managing WAL based standby servers. == Overview == It contains the following modules: === PgQ === PgQ is a queuing system written in PL/pgSQL, Python and C code. It is based on snapshot-based event handling ideas from Slony-I, and is written for general usage. PgQ provides an efficient, transactional, queueing system with multi-node support (including work sharing and splitting, failover and switchover, for queues and for consumers). Rules: - There can be several queues in a database. - There can be several producers than can insert into any queue. - There can be several consumers on one queue. - There can be several subconsumers on a consumer. PgQ is split into 3 layers: Producers, Ticker and Consumers. *Producers* and *Consumers* respectively push and read events into a queue. Producers just need to call PostgreSQL stored procedures (like a trigger on a table or a PostgreSQL call from the application). Consumers are frequently written in Python (the preferred language as it has a powerful Skytools Framework), but are not limited to Python; any language able to run PostgreSQL stored procedures can be used. *Ticker* is a daemon which splits the queues into batchs of events and handle the maintenance of the system. The Ticker is provided with Skytools. Documentation: - PgQ ticker daemon (pgqd) usage: link:doc/pgqd.html[] - PgQ admin tool (qadm) usage: link:doc/qadmin.html[] - PgQ SQL API overview: link:doc/pgq-sql.html[] - PgQ SQL reference: link:pgq/[] === Londiste === Replication tool written in Python, using PgQ as event transport. Features: - Tables can be added one-by-one into set. - Initial COPY for one table does not block event replay for other tables. - Can compare tables on both sides. Documentation: - Londiste script usage: doc/londiste3.txt (also available as `man 1 londiste`) - Londiste HOWTOs: doc/howto/ === walmgr === This script will setup WAL archiving, does the initial backup, and runtime WAL archive and restore. It can also be used for up-to-last-second partial file copying, so that less than the whole file is lost in case of loss of the master database server. == Source tree contents == doc/:: Documentation in AsciiDoc format. Source for both html and man pages. python/:: Python modules and primary executables - walmgr, londiste, qadmin, pgqadm. python/pgq/:: Python framework for PgQ. python/londiste/:: Londiste replication. python/skytools/:: Low-level utilities for writing database scripts in Python. sql/:: Database modules. sql/pgq/:: Table definitions and functions for PgQ queueing. sql/pgq_node/:: Framework for cascaded consuming. sql/pgq_coop/:: Functions for cooperative consuming. sql/londiste/:: Table definitions and functions for Londiste replication. sql/ticker/:: PgQ ticker written in C. scripts/:: Python scripts with lesser priority. lib/:: libusual C libary, for pgqd. debian/:: Debian packaging. This is for creating private packages, official Debian packages use their own packaging code. misc/:: Random scripts used for building. == Upgrade from 2.1 == Assuming PgQ + Londiste setup. This will upgrade PgQ to 3.0 and install Londiste 3 in parallel with Londiste 2. 1. Install Postgres modules. They are backwards compatible with 2.1. 2. Stop `pgqadm.py ticker` processes. 3. Apply pgq.upgrade_2.1_to_3.0.sql 3. Apply pgq.upgrade.sql 4. Apply pgq_node.sql 5. Apply londiste.sql - this will throw error on CREATE SCHEMA, but should otherwise apply fine. 6. Start pgqd. The files mentioned above are installed under $PREFIX/share/skytools3/ directory. skytools-3.2.6/debian/0000755000000000000000000000000012426435645011534 5ustar skytools-3.2.6/debian/skytools3.prerm0000644000000000000000000000027612426435645014562 0ustar #!/bin/sh -e case $1 in configure) for f in londiste scriptmgr queue_mover queue_splitter; do update-alternatives --remove $f /usr/bin/${f}3 || exit 1 ; done;; esac #DEBHELPER# skytools-3.2.6/debian/control0000644000000000000000000000512112426435645013136 0ustar Source: skytools3 Section: database Priority: extra Maintainer: Dimitri Fontaine Build-Depends: debhelper (>= 7.0.50~), autotools-dev, autoconf (>= 2.65), automake, libtool, libevent-dev, python-all-dev, python-support, python-psycopg2, xmlto, asciidoc, libpq-dev, postgresql-server-dev-all, postgresql-server-dev-8.4 | postgresql-server-dev-9.0 | postgresql-server-dev-9.1 | postgresql-server-dev-9.2 | postgresql-server-dev-9.3 | postgresql-server-dev-9.4 Standards-Version: 3.9.1 Homepage: http://wiki.postgresql.org/wiki/Skytools Vcs-Git: http://github.com/markokr/skytools.git Vcs-Browser: https://github.com/markokr/skytools Package: skytools3 Architecture: any Depends: ${misc:Depends}, ${python:Depends}, python-pgq3, adduser Description: Skype tools for PostgreSQL replication, londiste and PGQ This is a package of tools in use in Skype for replication and failover. It contains the Londiste and PGQ services and scripts to run. Package: python-pgq3 Architecture: any Section: python Depends: ${misc:Depends}, ${python:Depends}, python-skytools3, python-psycopg2 Description: python framework for Skype tools for PostgreSQL replication This is a package of tools in use in Skype for replication and failover. It includes a generic queuing mechanism PgQ and utility library for Python scripts. Package: python-skytools3 Architecture: any Section: python Depends: ${shlibs:Depends}, ${misc:Depends}, ${python:Depends} Suggests: python-psycopg2 Description: python framework for Skype tools for PostgreSQL replication This is a package of tools in use in Skype for replication and failover. It contains the Skytools' python framework for scripting against a PostgreSQL database, which is used by londiste, walmgr and PGQ. Package: skytools3-walmgr Architecture: any Depends: ${misc:Depends}, ${python:Depends}, python-skytools3, postgresql-8.4 | postgresql-9.0 | postgresql-9.1 | postgresql-9.2 | postgresql-9.3 | postgresql-9.4 Enhances: postgresql-8.4, postgresql-9.0, postgresql-9.1 Description: Skype tools for PostgreSQL replication and failover This is a package of tools in use in Skype for replication and failover. It's the WAL Shipping part of it (binary cluster-wide replication). Package: skytools3-ticker Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends} Description: Skype tools for PostgreSQL replication, londiste and PGQ This is the PGQ ticker used by Skype replication, londiste. It consists of PL/pgsql, and C code in database, with Python framework on top of it. It is based on snapshot based event handling ideas from Slony-I, written for general usage. skytools-3.2.6/debian/skytools3-walmgr.docs0000644000000000000000000000023512426435645015647 0ustar debian/tmp/usr/share/doc/skytools3/walmgr3.html debian/tmp/usr/share/doc/skytools3/conf/wal-master.ini debian/tmp/usr/share/doc/skytools3/conf/wal-slave.ini skytools-3.2.6/debian/skytools3-ticker.dirs0000644000000000000000000000001012426435645015637 0ustar usr/bin skytools-3.2.6/debian/source/0000755000000000000000000000000012426435645013034 5ustar skytools-3.2.6/debian/source/format0000644000000000000000000000001512426435645014243 0ustar 3.0 (native) skytools-3.2.6/debian/python-skytools3.install0000644000000000000000000000013712426435645016416 0ustar usr/lib/python*/site-packages/pkgloader.py usr/lib/python*/site-packages/skytools-3.0/skytools skytools-3.2.6/debian/compat0000644000000000000000000000000212426435645012732 0ustar 7 skytools-3.2.6/debian/postgresql-9.2-pgq3.dirs0000644000000000000000000000003512426435645015776 0ustar usr/share/doc/postgresql-9.2 skytools-3.2.6/debian/postgresql-9.0-pgq3.install0000644000000000000000000000016712426435645016507 0ustar usr/lib/postgresql/9.0/lib/pgq_triggers.so usr/lib/postgresql/9.0/lib/pgq_lowlevel.so usr/share/postgresql/9.0/contrib skytools-3.2.6/debian/skytools3-ticker.install0000644000000000000000000000002512426435645016352 0ustar usr/bin/pgqd usr/bin skytools-3.2.6/debian/postgresql-8.3-pgq3.docs0000644000000000000000000000005612426435645015770 0ustar sql/pgq/README.pgq sql/pgq_ext/README.pgq_ext skytools-3.2.6/debian/README.source0000644000000000000000000000071112426435645013712 0ustar skytools-3.0 for Debian ----------------------- This package is maintained in git and uses a submodule. To get a fresh checkout and build the packages, follow those steps: ## fetch git tree, from dimitri who maintains the debian package ## real upstream is at git://github.com/markokr/skytools-dev.git $ git clone http://github.com/dimitri/skytools.git ## fetch libusual submodule $ git submodule update --init ## now build $ debuild ... skytools-3.2.6/debian/postgresql-9.4-pgq3.install0000644000000000000000000000023212426435645016504 0ustar usr/lib/postgresql/9.4/lib/pgq_triggers.so usr/lib/postgresql/9.4/lib/pgq_lowlevel.so usr/share/postgresql/9.4/contrib usr/share/postgresql/9.4/extension skytools-3.2.6/debian/skytools3-ticker.manpages0000644000000000000000000000004512426435645016501 0ustar debian/tmp/usr/share/man/man1/pgqd.1 skytools-3.2.6/debian/postgresql-9.4-pgq3.docs0000644000000000000000000000005612426435645015772 0ustar sql/pgq/README.pgq sql/pgq_ext/README.pgq_ext skytools-3.2.6/debian/postgresql-9.0-pgq3.docs0000644000000000000000000000005612426435645015766 0ustar sql/pgq/README.pgq sql/pgq_ext/README.pgq_ext skytools-3.2.6/debian/postgresql-9.1-pgq3.dirs0000644000000000000000000000003512426435645015775 0ustar usr/share/doc/postgresql-9.1 skytools-3.2.6/debian/skytools3.init.d0000644000000000000000000001067112426435645014622 0ustar #!/bin/sh ### BEGIN INIT INFO # Provides: skytools3 # Required-Start: $network $local_fs $remote_fs # Required-Stop: $network $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Skype tools for PostgreSQL replication, londiste and PGQ ### END INIT INFO # Author: Dimitri Fontaine # PATH should only include /usr/* if it runs after the mountnfs.sh script PATH=/sbin:/usr/sbin:/bin:/usr/bin DESC=skytools3 # Introduce a short description here NAME=skytools3 # Introduce the short server's name here USER=skytools DAEMON=/usr/bin/scriptmgr3 # Introduce the server's location here DAEMON_ARGS="/etc/skytools.ini" # Arguments to run the daemon with PIDFILE=/var/run/skytools/$NAME.pid SCRIPTNAME=/etc/init.d/$NAME # Exit if the package is not installed [ -x $DAEMON ] || exit 0 # Read configuration variable file if it is present [ -r /etc/default/$NAME ] && . /etc/default/$NAME # Load the VERBOSE setting and other rcS variables . /lib/init/vars.sh # Define LSB log_* functions. # Depend on lsb-base (>= 3.0-6) to ensure that this file is present. . /lib/lsb/init-functions # care for transient data directories (think tmpfs) install -o skytools -g skytools -d /var/log/skytools /var/run/skytools # # Function that starts the daemon/service # do_start() { # Return # 0 if daemon has been started # 1 if daemon was already running # 2 if daemon could not be started start-stop-daemon --start --quiet --chuid $USER \ --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \ || return 1 start-stop-daemon --start --quiet --chuid $USER \ --pidfile $PIDFILE --exec $DAEMON -- $DAEMON_ARGS start -a \ || return 2 # Add code here, if necessary, that waits for the process to be ready # to handle requests from services started subsequently which depend # on this one. As a last resort, sleep for some time. } # # Function that stops the daemon/service # do_stop() { # Return # 0 if daemon has been stopped # 1 if daemon was already stopped # 2 if daemon could not be stopped # other if a failure occurred start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME RETVAL="$?" [ "$RETVAL" = 2 ] && return 2 # Wait for children to finish too if this is a daemon that forks # and if the daemon is only ever run from this initscript. # If the above conditions are not satisfied then add some other code # that waits for the process to drop all resources that could be # needed by services started subsequently. A last resort is to # sleep for some time. /usr/bin/scriptmgr $DAEMON_ARGS stop -a start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 \ --chuid $USER --exec $DAEMON [ "$?" = 2 ] && return 2 # Many daemons don't delete their pidfiles when they exit. rm -f $PIDFILE return "$RETVAL" } # # Function that sends a SIGHUP to the daemon/service # do_reload() { # # If the daemon can reload its configuration without # restarting (for example, when it is sent a SIGHUP), # then implement that here. # start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name $NAME return 0 } case "$1" in start) [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC " "$NAME" do_start case "$?" in 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;; esac ;; stop) [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME" do_stop case "$?" in 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;; esac ;; status) status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $? ;; #reload|force-reload) # # If do_reload() is not implemented then leave this commented out # and leave 'force-reload' as an alias for 'restart'. # #log_daemon_msg "Reloading $DESC" "$NAME" #do_reload #log_end_msg $? #;; restart|force-reload) # # If the "reload" option is implemented then remove the # 'force-reload' alias # log_daemon_msg "Restarting $DESC" "$NAME" do_stop case "$?" in 0|1) do_start case "$?" in 0) log_end_msg 0 ;; 1) log_end_msg 1 ;; # Old process is still running *) log_end_msg 1 ;; # Failed to start esac ;; *) # Failed to stop log_end_msg 1 ;; esac ;; *) #echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload}" >&2 echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2 exit 3 ;; esac : skytools-3.2.6/debian/postgresql-9.2-pgq3.docs0000644000000000000000000000005612426435645015770 0ustar sql/pgq/README.pgq sql/pgq_ext/README.pgq_ext skytools-3.2.6/debian/skytools3-ticker.docs0000644000000000000000000000006712426435645015642 0ustar debian/tmp/usr/share/doc/skytools3/conf/pgqd.ini.templ skytools-3.2.6/debian/postgresql-9.2-pgq3.install0000644000000000000000000000023212426435645016502 0ustar usr/lib/postgresql/9.2/lib/pgq_triggers.so usr/lib/postgresql/9.2/lib/pgq_lowlevel.so usr/share/postgresql/9.2/contrib usr/share/postgresql/9.2/extension skytools-3.2.6/debian/python-pgq3.install0000644000000000000000000000005712426435645015317 0ustar usr/lib/python*/site-packages/skytools-3.0/pgq skytools-3.2.6/debian/postgresql-9.3-pgq3.install0000644000000000000000000000023212426435645016503 0ustar usr/lib/postgresql/9.3/lib/pgq_triggers.so usr/lib/postgresql/9.3/lib/pgq_lowlevel.so usr/share/postgresql/9.3/contrib usr/share/postgresql/9.3/extension skytools-3.2.6/debian/README.Debian0000644000000000000000000000155112426435645013577 0ustar skytools-3.0 for Debian ----------------------- The skytools package for 3.0 has been reworked and split into a number of packages: skytools3 Skytool's replication and queuing python-pgq3 Skytool's PGQ python library python-skytools3 python scripts framework for skytools skytools-ticker3 PGQ ticker daemon service skytools-walmgr3 high-availability archive and restore commands postgresql-8.4-pgq3 PGQ server-side code (C module for PostgreSQL) postgresql-9.0-pgq3 PGQ server-side code (C module for PostgreSQL) You can install your script in /etc/skytools/*.ini and the skytools package will try to start them automatically, using scriptmgr. Of course you still need to install pgq for ticker services and londiste for replication. -- Dimitri Fontaine , Wed, 6 Apr 2011 17:07:35 +0200 skytools-3.2.6/debian/skytools3.install0000644000000000000000000000042312426435645015075 0ustar usr/bin/qadmin usr/bin/londiste3 usr/bin/scriptmgr3 usr/bin/queue_mover3 usr/bin/queue_splitter3 usr/bin/simple_consumer3 usr/bin/simple_local_consumer3 usr/bin/data_maintainer3 debian/skytools.ini /etc usr/lib/python*/site-packages/skytools-3.0/londiste usr/share/skytools3 skytools-3.2.6/debian/pgversions0000644000000000000000000000003412426435645013653 0ustar 8.3 8.4 9.0 9.1 9.2 9.3 9.4 skytools-3.2.6/debian/skytools3.dirs0000644000000000000000000000005112426435645014365 0ustar usr/bin etc/skytools usr/share/skytools3 skytools-3.2.6/debian/postgresql-8.4-pgq3.dirs0000644000000000000000000000003512426435645015777 0ustar usr/share/doc/postgresql-8.4 skytools-3.2.6/debian/copyright0000644000000000000000000001032012426435645013463 0ustar This work was packaged for Debian by: Dimitri Fontaine on Mon, 28 Mar 2011 14:28:13 +0200 It was downloaded from: https://github.com/markokr/skytools Copyright: Copyright (C) 2007-2011 Marko Kreen, Skype Technologies OÜ License: Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. The Debian packaging is: Copyright (C) 2011 Dimitri Fontaine The file lib/usual/lookup3.c is public domain The file lib/test/attregex/testregex.c has its own copyright, reproduced here: /* * regex(3) test harness * * build: cc -o testregex testregex.c * help: testregex --man * note: REG_* features are detected by #ifdef; if REG_* are enums * then supply #define REG_foo REG_foo for each enum REG_foo * * Glenn Fowler * AT&T Research * * PLEASE: publish your tests so everyone can benefit * * The following license covers testregex.c and all associated test data. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of THIS SOFTWARE FILE (the "Software"), to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, and/or sell copies of the * Software, and to permit persons to whom the Software is furnished to do * so, subject to the following disclaimer: * * THIS SOFTWARE IS PROVIDED BY AT&T ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL AT&T BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ The files lib/test/tinytest.[ch] has their own copyright, reproduced here: /* tinytest.c -- Copyright 2009 Nick Mathewson * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ skytools-3.2.6/debian/postgresql-8.3-pgq3.dirs0000644000000000000000000000003512426435645015776 0ustar usr/share/doc/postgresql-8.3 skytools-3.2.6/debian/skytools3-walmgr.prerm0000644000000000000000000000022712426435645016045 0ustar #!/bin/sh -e case $1 in configure) for f in walmgr; do update-alternatives --remove $f /usr/bin/${f}3 || exit 1 ; done;; esac #DEBHELPER# skytools-3.2.6/debian/postgresql-9.3-pgq3.dirs0000644000000000000000000000003512426435645015777 0ustar usr/share/doc/postgresql-9.3 skytools-3.2.6/debian/skytools3.docs0000644000000000000000000000074312426435645014364 0ustar debian/tmp/usr/share/doc/skytools3/scriptmgr.html debian/tmp/usr/share/doc/skytools3/skytools_upgrade.html debian/tmp/usr/share/doc/skytools3/qadmin.html debian/tmp/usr/share/doc/skytools3/skytools3.html debian/tmp/usr/share/doc/skytools3/queue_splitter3.html debian/tmp/usr/share/doc/skytools3/queue_mover3.html debian/tmp/usr/share/doc/skytools3/londiste3.html debian/tmp/usr/share/doc/skytools3/simple_consumer3.html debian/tmp/usr/share/doc/skytools3/simple_local_consumer3.html skytools-3.2.6/debian/postgresql-9.3-pgq3.docs0000644000000000000000000000005612426435645015771 0ustar sql/pgq/README.pgq sql/pgq_ext/README.pgq_ext skytools-3.2.6/debian/rules0000755000000000000000000000243012426435645012613 0ustar #!/usr/bin/make -f # -*- makefile -*- # Uncomment this to turn on verbose mode. #export DH_VERBOSE=1 SRCDIR = $(CURDIR) PKGVERS = $(shell dpkg-parsechangelog | awk -F '[:-]' '/^Version:/ { print substr($$2, 2) }') ORIG_EXCLUDE=--exclude-vcs --exclude=debian PG_BUILDEXT = pg_buildext include /usr/share/postgresql-common/pgxs_debian_control.mk config.mak: #./autogen.sh ./configure --prefix=/usr --with-asciidoc --with-sk3-subdir override_dh_auto_configure: config.mak override_dh_auto_clean: config.mak $(MAKE) -C doc realclean dh_auto_clean -- distclean for version in `cat $(CURDIR)/debian/pgversions`; do \ rm -rf "debian/postgresql-$${version}-pgq3"; \ done # build sql modules for several postgres versions override_dh_auto_install: config.mak mkdir -p $(CURDIR)/debian/tmp dh_auto_install $(MAKE) -C doc htmlinstall DESTDIR=$(CURDIR)/debian/tmp # now care about any previous supported versions for version in $$($(PG_BUILDEXT) supported-versions $(CURDIR)); do \ echo "### Building for PostgreSQL $$version" && \ make -C sql clean install \ PG_CONFIG=/usr/lib/postgresql/$$version/bin/pg_config \ DESTDIR=$(CURDIR)/debian/tmp \ || exit 1 ; \ done orig: config.mak rm -rf dist make tgz mv dist/*.tar.gz ../skytools3_$(PKGVERS).orig.tar.gz %: dh $@ skytools-3.2.6/debian/skytools3-walmgr.install0000644000000000000000000000002012426435645016355 0ustar usr/bin/walmgr3 skytools-3.2.6/debian/postgresql-8.3-pgq3.install0000644000000000000000000000016712426435645016511 0ustar usr/lib/postgresql/8.3/lib/pgq_triggers.so usr/lib/postgresql/8.3/lib/pgq_lowlevel.so usr/share/postgresql/8.3/contrib skytools-3.2.6/debian/postgresql-8.4-pgq3.install0000644000000000000000000000016712426435645016512 0ustar usr/lib/postgresql/8.4/lib/pgq_triggers.so usr/lib/postgresql/8.4/lib/pgq_lowlevel.so usr/share/postgresql/8.4/contrib skytools-3.2.6/debian/python-pgq3.docs0000644000000000000000000000022612426435645014577 0ustar debian/tmp/usr/share/doc/skytools3/pgq-sql.html debian/tmp/usr/share/doc/skytools3/pgq-nodupes.html debian/tmp/usr/share/doc/skytools3/set.notes.html skytools-3.2.6/debian/postgresql-9.1-pgq3.install0000644000000000000000000000023212426435645016501 0ustar usr/lib/postgresql/9.1/lib/pgq_triggers.so usr/lib/postgresql/9.1/lib/pgq_lowlevel.so usr/share/postgresql/9.1/contrib usr/share/postgresql/9.1/extension skytools-3.2.6/debian/skytools3-walmgr.manpages0000644000000000000000000000005012426435645016505 0ustar debian/tmp/usr/share/man/man1/walmgr3.1 skytools-3.2.6/debian/postgresql-9.1-pgq3.docs0000644000000000000000000000005612426435645015767 0ustar sql/pgq/README.pgq sql/pgq_ext/README.pgq_ext skytools-3.2.6/debian/python-skytools3.docs0000644000000000000000000000027112426435645015677 0ustar debian/tmp/usr/share/doc/skytools3/README.html debian/tmp/usr/share/doc/skytools3/TODO.html debian/tmp/usr/share/doc/skytools3/devnotes.html debian/tmp/usr/share/doc/skytools3/faq.html skytools-3.2.6/debian/postgresql-9.4-pgq3.dirs0000644000000000000000000000003512426435645016000 0ustar usr/share/doc/postgresql-9.4 skytools-3.2.6/debian/docs0000644000000000000000000000001412426435645012402 0ustar NEWS README skytools-3.2.6/debian/skytools.ini0000644000000000000000000000053112426435645014123 0ustar # configure your skytools services here # man scriptmgr for details. [scriptmgr] job_name = skytools3 logfile = /var/log/skytools/%(job_name)s.log pidfile = /var/run/skytools/%(job_name)s.pid config_list = /etc/skytools/*.ini [DEFAULT] cwd = / [londiste3] script = /usr/bin/londiste3 args = worker [pgqd] script = /usr/bin/pgqd skytools-3.2.6/debian/postgresql-9.0-pgq3.dirs0000644000000000000000000000003512426435645015774 0ustar usr/share/doc/postgresql-9.0 skytools-3.2.6/debian/skytools3-walmgr.postinst0000644000000000000000000000177112426435645016610 0ustar #!/bin/sh # postinst script for #PACKAGE# # # see: dh_installdeb(1) set -e # summary of how this script can be called: # * `configure' # * `abort-upgrade' # * `abort-remove' `in-favour' # # * `abort-remove' # * `abort-deconfigure' `in-favour' # `removing' # # for details, see http://www.debian.org/doc/debian-policy/ or # the debian-policy package case "$1" in configure) # scripts alternatives bin=/usr/bin man=/usr/share/man/man1 for f in walmgr do update-alternatives --install $bin/${f} $f $bin/${f}3 3 \ --slave $man/${f}.1.gz $f.1 $man/${f}3.1.gz || exit 1 done ;; esac # dh_installdeb will replace this with shell code automatically # generated by other debhelper scripts. #DEBHELPER# exit 0 skytools-3.2.6/debian/skytools3.postinst0000644000000000000000000000252112426435645015313 0ustar #!/bin/sh # postinst script for #PACKAGE# # # see: dh_installdeb(1) set -e # summary of how this script can be called: # * `configure' # * `abort-upgrade' # * `abort-remove' `in-favour' # # * `abort-remove' # * `abort-deconfigure' `in-favour' # `removing' # # for details, see http://www.debian.org/doc/debian-policy/ or # the debian-policy package case "$1" in configure) if getent passwd skytools > /dev/null; then echo user skytools already exists else adduser --system --no-create-home --home /var/lib/skytools --group --disabled-login skytools fi # care for transient data directories install -o skytools -g skytools -d /var/log/skytools /var/run/skytools # scripts alternatives bin=/usr/bin man=/usr/share/man/man1 for f in londiste scriptmgr queue_mover queue_splitter do update-alternatives --install $bin/${f} $f $bin/${f}3 3 \ --slave $man/${f}.1.gz $f.1 $man/${f}3.1.gz || exit 1 done ;; esac # dh_installdeb will replace this with shell code automatically # generated by other debhelper scripts. #DEBHELPER# exit 0 skytools-3.2.6/debian/control.in0000644000000000000000000000574612426435645013560 0ustar Source: skytools3 Section: database Priority: extra Maintainer: Dimitri Fontaine Build-Depends: debhelper (>= 7.0.50~), autotools-dev, autoconf (>= 2.65), automake, libtool, libevent-dev, python-all-dev, python-support, python-psycopg2, xmlto, asciidoc, libpq-dev, postgresql-server-dev-all, postgresql-server-dev-8.4 | postgresql-server-dev-9.0 | postgresql-server-dev-9.1 | postgresql-server-dev-9.2 | postgresql-server-dev-9.3 | postgresql-server-dev-9.4 Standards-Version: 3.9.1 Homepage: http://wiki.postgresql.org/wiki/Skytools Vcs-Git: http://github.com/markokr/skytools.git Vcs-Browser: https://github.com/markokr/skytools Package: skytools3 Architecture: any Depends: ${misc:Depends}, ${python:Depends}, python-pgq3, adduser Description: Skype tools for PostgreSQL replication, londiste and PGQ This is a package of tools in use in Skype for replication and failover. It contains the Londiste and PGQ services and scripts to run. Package: python-pgq3 Architecture: any Section: python Depends: ${misc:Depends}, ${python:Depends}, python-skytools3, python-psycopg2 Description: python framework for Skype tools for PostgreSQL replication This is a package of tools in use in Skype for replication and failover. It includes a generic queuing mechanism PgQ and utility library for Python scripts. Package: python-skytools3 Architecture: any Section: python Depends: ${shlibs:Depends}, ${misc:Depends}, ${python:Depends} Suggests: python-psycopg2 Description: python framework for Skype tools for PostgreSQL replication This is a package of tools in use in Skype for replication and failover. It contains the Skytools' python framework for scripting against a PostgreSQL database, which is used by londiste, walmgr and PGQ. Package: skytools3-walmgr Architecture: any Depends: ${misc:Depends}, ${python:Depends}, python-skytools3, postgresql-8.4 | postgresql-9.0 | postgresql-9.1 | postgresql-9.2 | postgresql-9.3 | postgresql-9.4 Enhances: postgresql-8.4, postgresql-9.0, postgresql-9.1 Description: Skype tools for PostgreSQL replication and failover This is a package of tools in use in Skype for replication and failover. It's the WAL Shipping part of it (binary cluster-wide replication). Package: skytools3-ticker Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends} Description: Skype tools for PostgreSQL replication, londiste and PGQ This is the PGQ ticker used by Skype replication, londiste. It consists of PL/pgsql, and C code in database, with Python framework on top of it. It is based on snapshot based event handling ideas from Slony-I, written for general usage. Package: postgresql-PGVERSION-pgq3 Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, postgresql-PGVERSION Provides: skytools-modules Conflicts: skytools-modules-PGVERSION Replaces: skytools-modules-PGVERSION Description: Skype tools for PostgreSQL replication, londiste and PGQ This is the PGQ extension used by Skype replication, londiste. It consists of PL/pgsql, and C code in database. skytools-3.2.6/debian/postgresql-8.4-pgq3.docs0000644000000000000000000000005612426435645015771 0ustar sql/pgq/README.pgq sql/pgq_ext/README.pgq_ext skytools-3.2.6/debian/skytools3.manpages0000644000000000000000000000050112426435645015217 0ustar debian/tmp/usr/share/man/man1/scriptmgr3.1 debian/tmp/usr/share/man/man1/qadmin.1 debian/tmp/usr/share/man/man1/londiste3.1 debian/tmp/usr/share/man/man1/queue_mover3.1 debian/tmp/usr/share/man/man1/queue_splitter3.1 debian/tmp/usr/share/man/man1/simple_consumer3.1 debian/tmp/usr/share/man/man1/simple_local_consumer3.1 skytools-3.2.6/debian/changelog0000644000000000000000000001471312426435645013414 0ustar skytools3 (3.2.6) experimental; urgency=low * v3.2.6 -- martinko Wed, 05 Nov 2014 16:00:00 +0100 skytools3 (3.2.5) experimental; urgency=low * v3.2.5 -- martinko Wed, 29 Oct 2014 09:40:00 +0100 skytools3 (3.2.4) experimental; urgency=low * v3.2.4 -- martinko Mon, 27 Oct 2014 10:50:00 +0100 skytools3 (3.2.3) experimental; urgency=low * v3.2.3 -- martinko Wed, 08 Oct 2014 10:40:00 +0200 skytools3 (3.2.2) experimental; urgency=low * v3.2.2 -- martinko Thu, 10 Apr 2014 14:00:00 +0200 skytools3 (3.2.1) experimental; urgency=low * v3.2.1 -- martinko Wed, 09 Apr 2014 17:34:51 +0200 skytools3 (3.2) experimental; urgency=low * v3.2 -- martinko Mon, 31 Mar 2014 16:17:18 +0200 skytools3 (3.1.6rc2) experimental; urgency=low * v3.1.6rc2 -- martinko Mon, 17 Mar 2014 16:17:18 +0100 skytools3 (3.1.6rc1) experimental; urgency=low * v3.1.6rc1 -- martinko Thu, 20 Feb 2014 15:00:00 +0100 skytools3 (3.1.5.4) experimental; urgency=low * v3.1.5.4 -- martinko Tue, 03 Dec 2013 15:16:17 +0100 skytools3 (3.1.5.3) experimental; urgency=low * v3.1.5.3 -- martinko Fri, 20 Sep 2013 14:28:56 +0200 skytools3 (3.1.5.2) experimental; urgency=low * v3.1.5.2 -- Marko Kreen Wed, 18 Sep 2013 15:41:00 +0300 skytools3 (3.1.5) experimental; urgency=low * v3.1.5 -- martinko Wed, 31 Jul 2013 14:15:16 +0200 skytools3 (3.1.4) experimental; urgency=low * v3.1.4 -- Marko Kreen Wed, 17 Apr 2013 11:08:30 +0300 skytools3 (3.1.3) experimental; urgency=low * v3.1.3 -- Marko Kreen Fri, 21 Dec 2012 10:57:05 +0200 skytools3 (3.1.2) experimental; urgency=low * v3.1.2 -- Marko Kreen Fri, 02 Nov 2012 11:43:51 +0200 skytools3 (3.1.1) experimental; urgency=low * v3.1.1 -- Marko Kreen Tue, 09 Oct 2012 13:18:34 +0300 skytools3 (3.1) experimental; urgency=low * v3.1 -- Marko Kreen Thu, 19 Jul 2012 11:04:58 +0300 skytools3 (3.0.3) experimental; urgency=low * v3.0.3 -- Marko Kreen Wed, 30 May 2012 15:57:04 +0300 skytools3 (3.0.2) experimental; urgency=low * v3.0.2 -- Marko Kreen Thu, 10 May 2012 21:29:16 +0300 skytools3 (3.0.1) experimental; urgency=low * v3.0.1 -- Marko Kreen Mon, 09 Apr 2012 10:33:37 +0300 skytools3 (3.0) experimental; urgency=low * v3.0 -- Marko Kreen Sat, 17 Mar 2012 13:50:42 +0200 skytools3 (3.0b1) experimental; urgency=low * test beta 1 -- Marko Kreen Thu, 05 Jan 2012 21:01:23 +0200 skytools3 (3.0~rc2-1) experimental; urgency=low * Fix skytools3-walmgr dependencies (support 9.1) * Bug fixes upstream -- Dimitri Fontaine Tue, 11 Oct 2011 12:06:54 +0200 skytools3 (3.0~rc1-2) experimental; urgency=low * Add automake to the Build Depends (Closes: #632574) * Merge with upstream -- Dimitri Fontaine Wed, 06 Jul 2011 16:07:12 +0200 skytools3 (3.0~rc1-1) experimental; urgency=low * New upstream version of skytools (Closes: #621115) -- Dimitri Fontaine Mon, 28 Mar 2011 14:28:13 +0200 skytools3 (3.0~a1-1) unstable; urgency=low * 3.0a1 -- Marko Kreen Mon, 13 Apr 2009 17:16:02 +0300 skytools3 (3.0~a-0) unstable; urgency=low * dev tree for 3.0 -- Marko Kreen Fri, 13 Feb 2009 17:33:55 +0200 skytools (2.1.12) unstable; urgency=low * v2.1.12 -- Marko Kreen Wed, 10 Nov 2010 15:23:59 +0200 skytools (2.1.12rc2) unstable; urgency=low * v2.1.12rc2 -- Marko Kreen Tue, 05 Oct 2010 11:43:38 +0300 skytools (2.1.12rc1) unstable; urgency=low * v2.1.12rc1 -- Marko Kreen Tue, 21 Sep 2010 07:26:22 -0700 skytools (2.1.11) unstable; urgency=low * v2.1.11 -- Marko Kreen Wed, 03 Feb 2010 18:28:57 +0200 skytools (2.1.11rc1) unstable; urgency=low * v2.1.11rc1 -- Marko Kreen Fri, 30 Oct 2009 18:05:58 +0200 skytools (2.1.10) unstable; urgency=low * v2.1.10 -- Marko Kreen Mon, 31 Aug 2009 16:44:53 +0300 skytools (2.1.10rc1) unstable; urgency=low * v2.1.10rc1 -- Marko Kreen Mon, 17 Aug 2009 15:52:16 +0300 skytools (2.1.9) unstable; urgency=low * v2.1.9 -- Marko Kreen Fri, 13 Mar 2009 15:39:18 +0200 skytools (2.1.9rc1) unstable; urgency=low * v2.1.9rc1 -- Marko Kreen Thu, 26 Feb 2009 14:50:49 +0200 skytools (2.1.8) unstable; urgency=low * v2.1.8 -- Marko Kreen Sun, 12 Oct 2008 13:29:09 +0300 skytools (2.1.8rc1) unstable; urgency=low * v2.1.8rc1 -- Marko Kreen Mon, 22 Sep 2008 16:31:27 +0300 skytools (2.1.7) unstable; urgency=low * v2.1.7 -- Marko Kreen Wed, 28 May 2008 17:04:32 +0300 skytools (2.1.6) unstable; urgency=low * Final release -- Marko Kreen Sat, 05 Apr 2008 16:45:11 +0300 skytools (2.1.6rc3) unstable; urgency=low * quoting/parsing fixes ? walmgr fix -- Marko Kreen Wed, 12 Mar 2008 15:43:39 +0200 skytools (2.1.6rc2) unstable; urgency=low * Bugfix release. -- Marko Kreen Fri, 07 Dec 2007 16:12:27 +0200 skytools (2.1.5) unstable; urgency=low * New public release. -- Marko Kreen Mon, 19 Nov 2007 15:32:41 +0200 skytools (2.1.4) unstable; urgency=low * Upgrade walmgr, some fixes. -- Marko Kreen Fri, 13 Apr 2007 11:08:41 +0300 skytools (2.1.3) unstable; urgency=low * brown paper bag -- Marko Kreen Tue, 10 Apr 2007 11:55:47 +0300 skytools (2.1.2) unstable; urgency=low * more bugfixes -- Marko Kreen Mon, 09 Apr 2007 17:56:35 +0300 skytools (2.1.1) unstable; urgency=low * bugfixes -- Marko Kreen Tue, 03 Apr 2007 15:03:28 +0300 skytools (2.1) unstable; urgency=low * cleanup -- Marko Kreen Fri, 02 Feb 2007 12:38:17 +0200 skytools-3.2.6/Makefile0000644000000000000000000000526312426435645011760 0ustar -include config.mak PYTHON ?= python pyver = $(shell $(PYTHON) -V 2>&1 | sed 's/^[^ ]* \([0-9]*\.[0-9]*\).*/\1/') SUBDIRS = sql doc # modules that use doctest for regtests DOCTESTMODS = skytools.quoting skytools.parsing skytools.timeutil \ skytools.sqltools skytools.querybuilder skytools.natsort \ skytools.utf8 skytools.sockutil skytools.fileutil \ londiste.exec_attrs all: python-all sub-all config.mak install: sub-install python-install distclean: sub-distclean sub-all sub-install sub-clean sub-distclean: for dir in $(SUBDIRS); do \ $(MAKE) -C $$dir $(subst sub-,,$@) DESTDIR=$(DESTDIR) || exit $?; \ done .PHONY: sub-all sub-clean sub-install sub-distclean python-all: config.mak $(PYTHON) setup_skytools.py build clean: sub-clean $(PYTHON) setup_skytools.py clean rm -rf build build.sk3 find python -name '*.py[oc]' -print | xargs rm -f rm -f python/skytools/installer_config.py source.list rm -rf tests/londiste/sys rm -rf tests/londiste/file_logs rm -rf tests/londiste/fix.* rm -rf tests/scripts/sys installcheck: $(MAKE) -C sql installcheck modules-install: config.mak $(MAKE) -C sql install DESTDIR=$(DESTDIR) test \! -d compat || $(MAKE) -C compat $@ DESTDIR=$(DESTDIR) SITEDIR = site-packages python-install: config.mak $(PYTHON) setup_pkgloader.py install --prefix=$(prefix) --root=$(DESTDIR)/ $(BROKEN_PYTHON) $(PYTHON) setup_skytools.py install --prefix=$(prefix) --root=$(DESTDIR)/ $(BROKEN_PYTHON) $(MAKE) -C doc DESTDIR=$(DESTDIR) install realclean: distclean $(MAKE) -C doc $@ $(MAKE) distclean distclean: sub-distclean rm -rf source.list dist skytools-* find python -name '*.pyc' | xargs rm -f rm -rf dist build rm -rf autom4te.cache config.log config.status config.mak deb: rm -f debian/control make -f debian/rules debian/control debuild -uc -us -b tgz: config.mak clean $(MAKE) -C doc man rm -f source.list $(PYTHON) setup_skytools.py sdist -t source.cfg -m source.list debclean: clean rm -rf debian/tmp-* debian/build* debian/packages-tmp* rm -f debian/files debian/sub* debian/packages grep-dctrl -vP PGVER debian/control.in > debian/control boot: configure configure: configure.ac lib/m4/usual.m4 ./autogen.sh tags: ctags `find python -name '*.py'` check: ./misc/docheck.sh # workaround for Debian's broken python debfix: @$(PYTHON) setup_skytools.py install --help | grep -q install-layout \ && echo BROKEN_PYTHON=--install-layout=deb || echo 'WORKING_PYTHON=found' .PHONY: all clean distclean install deb debclean tgz tags .PHONY: python-all python-clean python-install check test test: @cd python; for m in $(DOCTESTMODS); do \ printf "%-22s ... " $$m; \ $(PYTHON) -m $$m && echo "ok" || { echo "FAIL"; exit 1; }; \ done skytools-3.2.6/lib/0000755000000000000000000000000012474045613011053 5ustar skytools-3.2.6/lib/.gitignore0000644000000000000000000000033612166266754013056 0ustar configure usual/config.h* config.status config.log config.mak obj test/obj test/test_config.h test/regtest doc/html *.[oas] *.gc[odn][aov] ./install-sh config.guess config.sub ltmain.sh libtool build.mk .objs .libs .deps skytools-3.2.6/lib/build.mk.in0000644000000000000000000000012112166266754013113 0ustar include @abs_top_builddir@/config.mak include $(abs_top_srcdir)/mk/antimake.mk skytools-3.2.6/lib/usual/0000755000000000000000000000000012166266754012215 5ustar skytools-3.2.6/lib/usual/socket.h0000644000000000000000000000761012166266754013662 0ustar /* * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** @file * * Socket compat, few utils. * * Socket headers included: * - win32: * - win32: * - * - * - * - * - * - * - * - */ #ifndef _USUAL_SOCKET_H_ #define _USUAL_SOCKET_H_ #include #ifdef WIN32 #include #include #include #endif #include #ifdef HAVE_SYS_SOCKET_H #include #endif #ifdef HAVE_POLL_H #include #endif #ifdef HAVE_SYS_POLL_H #include #endif #ifdef HAVE_SYS_UIO_H #include #endif #ifdef HAVE_SYS_UN_H #include #endif #ifdef HAVE_NETINET_IN_H #include #endif #ifdef HAVE_NETINET_TCP_H #include #endif #ifdef HAVE_ARPA_INET_H #include #endif #ifndef INADDR_NONE /** Compat: Some systems (Solaris) does not define INADDR_NONE */ #define INADDR_NONE ((unsigned long) -1) #endif /** * Usual socket setup. * * - Disallow SIGPIPE * - Set close-on-exec flag * - Call \ref socket_set_nonblocking() with given flag */ bool socket_setup(int sock, bool non_block); /** * Flip sockets non-blocking flag */ bool socket_set_nonblocking(int sock, bool non_block); /** * Set sockets keepalive flags. * * @param fd TCP socket * @param onoff Whether to set keepalive on or off. * @param keepidle How long the socket must be idle before keepalive packets are sent * @param keepintvl How big period between consecutive keepalive packets. * @param keepcnt How many keepalive packets to send before considering socket dead. */ bool socket_set_keepalive(int fd, int onoff, int keepidle, int keepintvl, int keepcnt); /** * Convert struct sockaddr to stirng. * * Supports: ipv4, ipv5, unix sockets. */ const char *sa2str(const struct sockaddr *sa, char *buf, int buflen); #ifndef HAVE_INET_NTOP #define inet_ntop(a,b,c,d) usual_inet_ntop(a,b,c,d) /** Compat: inet_ntop() */ const char *inet_ntop(int af, const void *src, char *dst, int cnt); #endif #ifndef HAVE_INET_PTON #define inet_pton(a,b,c) usual_inet_pton(a,b,c) /** Compat: inet_pton() */ int inet_pton(int af, const char *src, void *dst); #endif #ifndef HAVE_GETPEEREID #define getpeereid(a,b,c) compat_getpeereid(a,b,c) /** Get user id of UNIX socket peer */ int getpeereid(int fd, uid_t *uid_p, gid_t *gid_p); #endif #define getpeercreds(a,b,c,d) usual_getpeercreds(a,b,c,d) /** Get info of UNIX socket peer */ int getpeercreds(int fd, uid_t *uid_p, gid_t *gid_p, pid_t *pid_p); #if !defined(HAVE_POLL) #define POLLIN (1 << 0) #define POLLOUT (1 << 1) #define POLLHUP (1 << 2) #define POLLPRI (1 << 3) #define POLLNVAL (1 << 4) #define POLLERR (1 << 5) #define poll(a,b,c) compat_poll(a,b,c) struct pollfd { int fd; short events; short revents; }; typedef unsigned long nfds_t; /** Compat: select-based poll() */ int poll(struct pollfd *fds, nfds_t nfds, int timeout_ms); #endif #ifdef WIN32 #define socketpair(a,b,c,d) win32_socketpair(a,b,c,d) /** Compat: socketpair() for win32 */ int socketpair(int d, int typ, int proto, int sv[2]); #endif #endif skytools-3.2.6/lib/usual/pthread.h0000644000000000000000000000343612166266754014023 0ustar /* * Copyright (c) 2007-2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** @file * * Pthreads compat for win32. */ #ifndef _USUAL_PTHREAD_H_ #define _USUAL_PTHREAD_H_ #include #ifdef HAVE_PTHREAD_H #include #else #ifdef WIN32 #define pthread_create(a,b,c,d) compat_pthread_create(a,b,c,d) #define pthread_mutex_init(a,b) compat_pthread_mutex_init(a,b) #define pthread_mutex_destroy(a) compat_pthread_mutex_destroy(a) #define pthread_mutex_lock(a) compat_pthread_mutex_lock(a) #define pthread_mutex_unlock(a) compat_pthread_mutex_unlock(a) #define pthread_join(a,b) compat_pthread_join(a,b) typedef HANDLE pthread_t; typedef HANDLE pthread_mutex_t; typedef int pthread_attr_t; int pthread_create(pthread_t *t, pthread_attr_t *attr, void *(*fn)(void *), void *arg); int pthread_mutex_init(pthread_mutex_t *lock, void *unused); int pthread_mutex_destroy(pthread_mutex_t *lock); int pthread_mutex_lock(pthread_mutex_t *lock); int pthread_mutex_unlock(pthread_mutex_t *lock); int pthread_join(pthread_t *t, void **ret); #endif /* WIN32 */ #endif /* HAVE_PTHREAD_H */ #endif skytools-3.2.6/lib/usual/pgutil.h0000644000000000000000000000276612166266754013705 0ustar /* * libusual - Utility library for C * * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * * Utility functions for PostgreSQL data formats. */ #ifndef _USUAL_PGUTIL_H_ #define _USUAL_PGUTIL_H_ #include /** Check if string is reserver word for PostgreSQL. */ bool pg_is_reserved_word(const char *str); /** Quote value as string for PostgreSQL */ bool pg_quote_literal(char *_dst, const char *_src, int dstlen); /** Quote value as ident for PostgreSQL */ bool pg_quote_ident(char *_dst, const char *_src, int dstlen); /** Quote fully-qualified ident for PostgreSQL */ bool pg_quote_fqident(char *_dst, const char *_src, int dstlen); /** Parse PostgreSQL array. */ struct StrList *pg_parse_array(const char *pgarr, CxMem *cx); #endif skytools-3.2.6/lib/usual/getopt.h0000644000000000000000000000670012166266754013673 0ustar /* * Copyright (c) 2000 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Dieter Baron and Thomas Klausner. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** * @file * getopt compat. * * This module provides getopt() and getopt_long(). */ #ifndef _USUAL_GETOPT_H_ #define _USUAL_GETOPT_H_ #include #ifndef NEED_USUAL_GETOPT #if !defined(HAVE_GETOPT_H) || !defined(HAVE_GETOPT) || !defined(HAVE_GETOPT_LONG) #define NEED_USUAL_GETOPT #endif #endif #ifndef NEED_USUAL_GETOPT /* Use system getopt */ #include #else /* NEED_USUAL_GETOPT */ /* avoid name collision */ #define optarg usual_optarg #define opterr usual_opterr #define optind usual_optind #define optopt usual_optopt #define getopt(a,b,c) usual_getopt(a,b,c) #define getopt_long(a,b,c,d,e) usual_getopt_long(a,b,c,d,e) /** argument to current option, or NULL if it has none */ extern char *optarg; /** Current position in arg string. Starts from 1. Setting to 0 resets state. */ extern int optind; /** whether getopt() should print error messages on problems. Default: 1. */ extern int opterr; /** Option char which caused error */ extern int optopt; /** long option takes no argument */ #define no_argument 0 /** long option requires argument */ #define required_argument 1 /** long option has optional argument */ #define optional_argument 2 /** Long option description */ struct option { /** name of long option */ const char *name; /** * whether option takes an argument. * One of no_argument, required_argument, and optional_argument. */ int has_arg; /** if not NULL, set *flag to val when option found */ int *flag; /** if flag not NULL, value to set *flag to; else return value */ int val; }; /** Compat: getopt */ int getopt(int argc, char *argv[], const char *options); /** Compat: getopt_long */ int getopt_long(int argc, char *argv[], const char *options, const struct option *longopts, int *longindex); /** Compat: getopt_long_only */ int getopt_long_only(int nargc, char *argv[], const char *options, const struct option *long_options, int *idx); #endif /* NEED_USUAL_GETOPT */ #endif /* !_USUAL_GETOPT_H_ */ skytools-3.2.6/lib/usual/base.h0000644000000000000000000001475012166266754013307 0ustar /** @file * Basic C environment. */ /* * Copyright (c) 2007-2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _USUAL_BASE_H_ #define _USUAL_BASE_H_ #ifdef USUAL_TEST_CONFIG #include "test_config.h" #elif defined(_MSC_VER) #include #else #include #endif /* solaris is broken otherwise */ #if defined(__sun) #define _XPG4_2 #define __EXTENSIONS__ #endif #include #ifdef HAVE_SYS_PARAM_H #include #endif #include #include #ifdef HAVE_INTTYPES_H #include #endif #include #include #include #include #ifdef HAVE_UNISTD_H #include #endif #ifdef HAVE_STDBOOL_H #include #else /* we really want bool type */ typedef enum { true=1, false=0 } bool; #endif #ifdef WIN32 #include #define DLLEXPORT __declspec(dllexport) #define DLLIMPORT __declspec(dllimport) #else #define DLLEXPORT #define DLLIMPORT #endif /** give offset of a field inside struct */ #ifndef offsetof #define offsetof(type, field) ((unsigned long)&(((type *)0)->field)) #endif /** given pointer to field inside struct, return pointer to struct */ #ifndef container_of #define container_of(ptr, type, field) ((type *)((char *)(ptr) - offsetof(type, field))) #endif /** get alignment requirement for a type */ #ifndef alignof #define alignof(type) offsetof(struct { char c; type t; }, t) #endif /** power-of-2 alignment */ #ifndef CUSTOM_ALIGN #define CUSTOM_ALIGN(x, a) (((uintptr_t)(x) + (uintptr_t)(a) - 1) & ~((uintptr_t)(a) - 1)) #endif /** preferred alignment */ #ifndef ALIGN #define ALIGN(x) CUSTOM_ALIGN(x, sizeof(long)) #endif /** number of elements in array */ #define ARRAY_NELEM(a) (sizeof(a) / sizeof((a)[0])) /* how to specify array with unknown length */ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) #define FLEX_ARRAY #elif defined(__GNUC__) && (__GNUC__ >= 3) #define FLEX_ARRAY #else #define FLEX_ARRAY 1 #endif /** Make string token from C expression */ #define STR(x) _STR_(x) #define _STR_(x) #x /** Make single C token from 2 separate tokens */ #define CONCAT(a, b) _CONCAT_(a, b) #define _CONCAT_(a, b) a ## b /** Make single C token from 3 separate tokens */ #define CONCAT3(a, b, c) _CONCAT3_(a, b, c) #define _CONCAT3_(a, b, c) a ## b ## c /** Make single C token from 4 separate tokens */ #define CONCAT4(a, b, c, d) _CONCAT4_(a, b, c, d) #define _CONCAT4_(a, b, c, d) a ## b ## c ## d /** * @name Compiler attributes. */ /* Compiler detection for internal usage. */ #define _COMPILER_GNUC(maj,min) (defined(__GNUC__) && \ ((__GNUC__ > (maj)) || (__GNUC__ == (maj) && __GNUC_MINOR__ >= (min)))) #define _COMPILER_CLANG(maj,min) (defined(__clang__) && \ ((__clang_major__ > (maj)) || (__clang_major__ == (maj) && __clang_minor__ >= (min)))) #define _COMPILER_MSC(ver) (defined(_MSC_VER) && (_MSC_VER >= (ver))) #define _COMPILER_ICC(ver) (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= (ver))) /** Disable padding for structure */ #ifndef _MSC_VER #define _PACKED __attribute__((packed)) #endif /* * Make sure __func__ works. */ #ifndef HAVE_FUNCNAME__FUNC #define __func__ __FUNCTION__ #endif /* * make compiler do something useful */ #ifndef _MUSTCHECK #if defined(__GNUC__) && (__GNUC__ >= 4) /** Show warning if function result is not used */ #define _MUSTCHECK __attribute__((warn_unused_result)) /** Show warning if used */ #define _DEPRECATED __attribute__((deprecated)) /** Check printf-style format and arg sanity */ #define _PRINTF(fmtpos, argpos) __attribute__((format(printf, fmtpos, argpos))) /** Function returns new pointer */ #define _MALLOC __attribute__((malloc)) /** Disable 'unused' warning for function/argument. */ #define _UNUSED __attribute__((unused)) /** Do not inline function. */ #define _NOINLINE __attribute__((noinline)) /** Indicates that function never returns */ #define _NORETURN __attribute__((noreturn)) /* compiler hints - those do not seem to work well */ #define unlikely(x) __builtin_expect(!!(x), 0) #define likely(x) __builtin_expect(!!(x), 1) #else /* non gcc */ #define _MUSTCHECK #define _DEPRECATED #define _PRINTF(x,y) #define _MALLOC #define _UNUSED #define _NOINLINE #define _NORETURN #define unlikely(x) x #define likely(x) x #endif #endif /* @} */ /** * Compile-time assert. * * Expression must be evaluatable at compile time. * If false, stop compilation with message. * * It can be used in either global or function scope. */ #ifndef static_assert #if _COMPILER_GNUC(4,6) || _COMPILER_CLANG(3,0) || _COMPILER_MSC(1600) /* Version for new compilers */ #define static_assert(expr, msg) _Static_assert(expr, msg) #else /* Version for old compilers */ #define static_assert(expr, msg) enum { CONCAT4(static_assert_failure_, __LINE__, _, __COUNTER__) = 1/(1 != (1 + (expr))) } #endif #endif /* !static_assert */ /** assert() that uses module */ #ifndef Assert #ifdef CASSERT void log_fatal(const char *file, int line, const char *func, bool show_perror, void *ctx, const char *s, ...) _PRINTF(6, 7); #define Assert(e) \ do { \ if (unlikely(!(e))) { \ log_fatal(__FILE__, __LINE__, __func__, false, NULL, \ "Assert(%s) failed", #e); \ abort(); \ } \ } while (0) #else #define Assert(e) #endif #endif /* Fix posix bug by accepting const pointer. */ static inline void _const_free(const void *p) { free((void *)p); } /** Compat: make free() accept const pointer */ #define free(x) _const_free(x) /** Zeroing malloc */ _MUSTCHECK static inline void *zmalloc(size_t len) { return calloc(1, len); } #ifndef HAVE_POSIX_MEMALIGN #define posix_memalign(a,b,c) usual_memalign(a,b,c) /** Compat: posix_memalign() */ int posix_memalign(void **ptr_p, size_t align, size_t len); #endif #endif skytools-3.2.6/lib/usual/pgutil_kwlookup.h0000644000000000000000000005374312166266754015641 0ustar /* ANSI-C code produced by gperf version 3.0.3 */ /* Command-line: gperf -m5 usual/pgutil_kwlookup.g */ /* Computed positions: -k'1-2,6,9,$' */ #if !((' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) \ && ('%' == 37) && ('&' == 38) && ('\'' == 39) && ('(' == 40) \ && (')' == 41) && ('*' == 42) && ('+' == 43) && (',' == 44) \ && ('-' == 45) && ('.' == 46) && ('/' == 47) && ('0' == 48) \ && ('1' == 49) && ('2' == 50) && ('3' == 51) && ('4' == 52) \ && ('5' == 53) && ('6' == 54) && ('7' == 55) && ('8' == 56) \ && ('9' == 57) && (':' == 58) && (';' == 59) && ('<' == 60) \ && ('=' == 61) && ('>' == 62) && ('?' == 63) && ('A' == 65) \ && ('B' == 66) && ('C' == 67) && ('D' == 68) && ('E' == 69) \ && ('F' == 70) && ('G' == 71) && ('H' == 72) && ('I' == 73) \ && ('J' == 74) && ('K' == 75) && ('L' == 76) && ('M' == 77) \ && ('N' == 78) && ('O' == 79) && ('P' == 80) && ('Q' == 81) \ && ('R' == 82) && ('S' == 83) && ('T' == 84) && ('U' == 85) \ && ('V' == 86) && ('W' == 87) && ('X' == 88) && ('Y' == 89) \ && ('Z' == 90) && ('[' == 91) && ('\\' == 92) && (']' == 93) \ && ('^' == 94) && ('_' == 95) && ('a' == 97) && ('b' == 98) \ && ('c' == 99) && ('d' == 100) && ('e' == 101) && ('f' == 102) \ && ('g' == 103) && ('h' == 104) && ('i' == 105) && ('j' == 106) \ && ('k' == 107) && ('l' == 108) && ('m' == 109) && ('n' == 110) \ && ('o' == 111) && ('p' == 112) && ('q' == 113) && ('r' == 114) \ && ('s' == 115) && ('t' == 116) && ('u' == 117) && ('v' == 118) \ && ('w' == 119) && ('x' == 120) && ('y' == 121) && ('z' == 122) \ && ('{' == 123) && ('|' == 124) && ('}' == 125) && ('~' == 126)) /* The character set is not based on ISO-646. */ #error "gperf generated tables don't work with this execution character set. Please report a bug to ." #endif /* maximum key range = 296, duplicates = 0 */ #ifdef __GNUC__ __inline #else #ifdef __cplusplus inline #endif #endif static unsigned int pg_keyword_lookup_hash (register const char *str, register unsigned int len) { static const unsigned short asso_values[] = { 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 38, 125, 31, 64, 10, 96, 60, 125, 26, 7, 5, 13, 63, 10, 12, 70, 312, 5, 19, 3, 71, 131, 65, 50, 77, 3, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312, 312 }; register int hval = len; switch (hval) { default: hval += asso_values[(unsigned char)str[8]]; /*FALLTHROUGH*/ case 8: case 7: case 6: hval += asso_values[(unsigned char)str[5]]; /*FALLTHROUGH*/ case 5: case 4: case 3: case 2: hval += asso_values[(unsigned char)str[1]]; /*FALLTHROUGH*/ case 1: hval += asso_values[(unsigned char)str[0]]; break; } return hval + asso_values[(unsigned char)str[len - 1]]; } #ifdef __GNUC__ __inline #ifdef __GNUC_STDC_INLINE__ __attribute__ ((__gnu_inline__)) #endif #endif const char * pg_keyword_lookup_real (register const char *str, register unsigned int len) { enum { TOTAL_KEYWORDS = 148, MIN_WORD_LENGTH = 2, MAX_WORD_LENGTH = 17, MIN_HASH_VALUE = 16, MAX_HASH_VALUE = 311 }; struct pgkw_t { char pgkw_str16[sizeof("treat")]; char pgkw_str22[sizeof("true")]; char pgkw_str24[sizeof("or")]; char pgkw_str27[sizeof("order")]; char pgkw_str28[sizeof("not")]; char pgkw_str29[sizeof("to")]; char pgkw_str30[sizeof("left")]; char pgkw_str31[sizeof("least")]; char pgkw_str32[sizeof("real")]; char pgkw_str33[sizeof("join")]; char pgkw_str34[sizeof("on")]; char pgkw_str36[sizeof("none")]; char pgkw_str37[sizeof("else")]; char pgkw_str39[sizeof("right")]; char pgkw_str41[sizeof("select")]; char pgkw_str42[sizeof("int")]; char pgkw_str43[sizeof("time")]; char pgkw_str44[sizeof("inout")]; char pgkw_str45[sizeof("some")]; char pgkw_str46[sizeof("inner")]; char pgkw_str47[sizeof("limit")]; char pgkw_str48[sizeof("in")]; char pgkw_str51[sizeof("nchar")]; char pgkw_str52[sizeof("into")]; char pgkw_str53[sizeof("like")]; char pgkw_str54[sizeof("ilike")]; char pgkw_str55[sizeof("notnull")]; char pgkw_str56[sizeof("table")]; char pgkw_str57[sizeof("localtime")]; char pgkw_str58[sizeof("integer")]; char pgkw_str60[sizeof("cross")]; char pgkw_str62[sizeof("create")]; char pgkw_str63[sizeof("collate")]; char pgkw_str64[sizeof("references")]; char pgkw_str66[sizeof("is")]; char pgkw_str67[sizeof("all")]; char pgkw_str68[sizeof("analyze")]; char pgkw_str69[sizeof("column")]; char pgkw_str70[sizeof("intersect")]; char pgkw_str71[sizeof("constraint")]; char pgkw_str72[sizeof("except")]; char pgkw_str73[sizeof("grant")]; char pgkw_str75[sizeof("trim")]; char pgkw_str76[sizeof("cast")]; char pgkw_str77[sizeof("isnull")]; char pgkw_str78[sizeof("as")]; char pgkw_str79[sizeof("national")]; char pgkw_str80[sizeof("coalesce")]; char pgkw_str83[sizeof("case")]; char pgkw_str84[sizeof("analyse")]; char pgkw_str85[sizeof("row")]; char pgkw_str86[sizeof("greatest")]; char pgkw_str87[sizeof("end")]; char pgkw_str88[sizeof("new")]; char pgkw_str89[sizeof("out")]; char pgkw_str90[sizeof("do")]; char pgkw_str91[sizeof("asc")]; char pgkw_str92[sizeof("old")]; char pgkw_str93[sizeof("outer")]; char pgkw_str95[sizeof("similar")]; char pgkw_str96[sizeof("union")]; char pgkw_str97[sizeof("default")]; char pgkw_str98[sizeof("null")]; char pgkw_str99[sizeof("user")]; char pgkw_str100[sizeof("leading")]; char pgkw_str101[sizeof("extract")]; char pgkw_str102[sizeof("trailing")]; char pgkw_str103[sizeof("only")]; char pgkw_str104[sizeof("exists")]; char pgkw_str106[sizeof("natural")]; char pgkw_str107[sizeof("unique")]; char pgkw_str108[sizeof("dec")]; char pgkw_str109[sizeof("desc")]; char pgkw_str111[sizeof("distinct")]; char pgkw_str112[sizeof("deferrable")]; char pgkw_str115[sizeof("and")]; char pgkw_str116[sizeof("for")]; char pgkw_str117[sizeof("float")]; char pgkw_str119[sizeof("smallint")]; char pgkw_str120[sizeof("offset")]; char pgkw_str122[sizeof("localtimestamp")]; char pgkw_str123[sizeof("precision")]; char pgkw_str125[sizeof("array")]; char pgkw_str126[sizeof("position")]; char pgkw_str127[sizeof("freeze")]; char pgkw_str128[sizeof("any")]; char pgkw_str129[sizeof("session_user")]; char pgkw_str130[sizeof("setof")]; char pgkw_str132[sizeof("decimal")]; char pgkw_str133[sizeof("xmlforest")]; char pgkw_str134[sizeof("asymmetric")]; char pgkw_str135[sizeof("xmlroot")]; char pgkw_str136[sizeof("xmlparse")]; char pgkw_str137[sizeof("current_time")]; char pgkw_str138[sizeof("xmlconcat")]; char pgkw_str139[sizeof("current_role")]; char pgkw_str140[sizeof("group")]; char pgkw_str142[sizeof("then")]; char pgkw_str144[sizeof("xmlpi")]; char pgkw_str145[sizeof("numeric")]; char pgkw_str146[sizeof("xmlelement")]; char pgkw_str147[sizeof("concurrently")]; char pgkw_str149[sizeof("false")]; char pgkw_str152[sizeof("over")]; char pgkw_str153[sizeof("xmlserialize")]; char pgkw_str154[sizeof("returning")]; char pgkw_str155[sizeof("using")]; char pgkw_str157[sizeof("bit")]; char pgkw_str160[sizeof("placing")]; char pgkw_str162[sizeof("between")]; char pgkw_str163[sizeof("bigint")]; char pgkw_str164[sizeof("primary")]; char pgkw_str165[sizeof("char")]; char pgkw_str166[sizeof("check")]; char pgkw_str168[sizeof("from")]; char pgkw_str170[sizeof("symmetric")]; char pgkw_str175[sizeof("authorization")]; char pgkw_str177[sizeof("verbose")]; char pgkw_str181[sizeof("timestamp")]; char pgkw_str183[sizeof("current_schema")]; char pgkw_str184[sizeof("full")]; char pgkw_str185[sizeof("foreign")]; char pgkw_str186[sizeof("xmlexists")]; char pgkw_str188[sizeof("interval")]; char pgkw_str192[sizeof("boolean")]; char pgkw_str198[sizeof("current_date")]; char pgkw_str200[sizeof("current_user")]; char pgkw_str202[sizeof("current_timestamp")]; char pgkw_str204[sizeof("when")]; char pgkw_str205[sizeof("where")]; char pgkw_str206[sizeof("character")]; char pgkw_str207[sizeof("off")]; char pgkw_str208[sizeof("overlaps")]; char pgkw_str213[sizeof("values")]; char pgkw_str218[sizeof("current_catalog")]; char pgkw_str219[sizeof("varchar")]; char pgkw_str220[sizeof("with")]; char pgkw_str224[sizeof("substring")]; char pgkw_str227[sizeof("window")]; char pgkw_str236[sizeof("fetch")]; char pgkw_str237[sizeof("initially")]; char pgkw_str265[sizeof("overlay")]; char pgkw_str266[sizeof("both")]; char pgkw_str272[sizeof("variadic")]; char pgkw_str273[sizeof("xmlattributes")]; char pgkw_str279[sizeof("nullif")]; char pgkw_str289[sizeof("having")]; char pgkw_str311[sizeof("binary")]; }; static const struct pgkw_t pgkw_contents = { "treat", "true", "or", "order", "not", "to", "left", "least", "real", "join", "on", "none", "else", "right", "select", "int", "time", "inout", "some", "inner", "limit", "in", "nchar", "into", "like", "ilike", "notnull", "table", "localtime", "integer", "cross", "create", "collate", "references", "is", "all", "analyze", "column", "intersect", "constraint", "except", "grant", "trim", "cast", "isnull", "as", "national", "coalesce", "case", "analyse", "row", "greatest", "end", "new", "out", "do", "asc", "old", "outer", "similar", "union", "default", "null", "user", "leading", "extract", "trailing", "only", "exists", "natural", "unique", "dec", "desc", "distinct", "deferrable", "and", "for", "float", "smallint", "offset", "localtimestamp", "precision", "array", "position", "freeze", "any", "session_user", "setof", "decimal", "xmlforest", "asymmetric", "xmlroot", "xmlparse", "current_time", "xmlconcat", "current_role", "group", "then", "xmlpi", "numeric", "xmlelement", "concurrently", "false", "over", "xmlserialize", "returning", "using", "bit", "placing", "between", "bigint", "primary", "char", "check", "from", "symmetric", "authorization", "verbose", "timestamp", "current_schema", "full", "foreign", "xmlexists", "interval", "boolean", "current_date", "current_user", "current_timestamp", "when", "where", "character", "off", "overlaps", "values", "current_catalog", "varchar", "with", "substring", "window", "fetch", "initially", "overlay", "both", "variadic", "xmlattributes", "nullif", "having", "binary" }; #define pgkw ((const char *) &pgkw_contents) static const int wordlist[] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str16, -1, -1, -1, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str22, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str24, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str27, (int)(long)&((struct pgkw_t *)0)->pgkw_str28, (int)(long)&((struct pgkw_t *)0)->pgkw_str29, (int)(long)&((struct pgkw_t *)0)->pgkw_str30, (int)(long)&((struct pgkw_t *)0)->pgkw_str31, (int)(long)&((struct pgkw_t *)0)->pgkw_str32, (int)(long)&((struct pgkw_t *)0)->pgkw_str33, (int)(long)&((struct pgkw_t *)0)->pgkw_str34, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str36, (int)(long)&((struct pgkw_t *)0)->pgkw_str37, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str39, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str41, (int)(long)&((struct pgkw_t *)0)->pgkw_str42, (int)(long)&((struct pgkw_t *)0)->pgkw_str43, (int)(long)&((struct pgkw_t *)0)->pgkw_str44, (int)(long)&((struct pgkw_t *)0)->pgkw_str45, (int)(long)&((struct pgkw_t *)0)->pgkw_str46, (int)(long)&((struct pgkw_t *)0)->pgkw_str47, (int)(long)&((struct pgkw_t *)0)->pgkw_str48, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str51, (int)(long)&((struct pgkw_t *)0)->pgkw_str52, (int)(long)&((struct pgkw_t *)0)->pgkw_str53, (int)(long)&((struct pgkw_t *)0)->pgkw_str54, (int)(long)&((struct pgkw_t *)0)->pgkw_str55, (int)(long)&((struct pgkw_t *)0)->pgkw_str56, (int)(long)&((struct pgkw_t *)0)->pgkw_str57, (int)(long)&((struct pgkw_t *)0)->pgkw_str58, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str60, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str62, (int)(long)&((struct pgkw_t *)0)->pgkw_str63, (int)(long)&((struct pgkw_t *)0)->pgkw_str64, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str66, (int)(long)&((struct pgkw_t *)0)->pgkw_str67, (int)(long)&((struct pgkw_t *)0)->pgkw_str68, (int)(long)&((struct pgkw_t *)0)->pgkw_str69, (int)(long)&((struct pgkw_t *)0)->pgkw_str70, (int)(long)&((struct pgkw_t *)0)->pgkw_str71, (int)(long)&((struct pgkw_t *)0)->pgkw_str72, (int)(long)&((struct pgkw_t *)0)->pgkw_str73, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str75, (int)(long)&((struct pgkw_t *)0)->pgkw_str76, (int)(long)&((struct pgkw_t *)0)->pgkw_str77, (int)(long)&((struct pgkw_t *)0)->pgkw_str78, (int)(long)&((struct pgkw_t *)0)->pgkw_str79, (int)(long)&((struct pgkw_t *)0)->pgkw_str80, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str83, (int)(long)&((struct pgkw_t *)0)->pgkw_str84, (int)(long)&((struct pgkw_t *)0)->pgkw_str85, (int)(long)&((struct pgkw_t *)0)->pgkw_str86, (int)(long)&((struct pgkw_t *)0)->pgkw_str87, (int)(long)&((struct pgkw_t *)0)->pgkw_str88, (int)(long)&((struct pgkw_t *)0)->pgkw_str89, (int)(long)&((struct pgkw_t *)0)->pgkw_str90, (int)(long)&((struct pgkw_t *)0)->pgkw_str91, (int)(long)&((struct pgkw_t *)0)->pgkw_str92, (int)(long)&((struct pgkw_t *)0)->pgkw_str93, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str95, (int)(long)&((struct pgkw_t *)0)->pgkw_str96, (int)(long)&((struct pgkw_t *)0)->pgkw_str97, (int)(long)&((struct pgkw_t *)0)->pgkw_str98, (int)(long)&((struct pgkw_t *)0)->pgkw_str99, (int)(long)&((struct pgkw_t *)0)->pgkw_str100, (int)(long)&((struct pgkw_t *)0)->pgkw_str101, (int)(long)&((struct pgkw_t *)0)->pgkw_str102, (int)(long)&((struct pgkw_t *)0)->pgkw_str103, (int)(long)&((struct pgkw_t *)0)->pgkw_str104, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str106, (int)(long)&((struct pgkw_t *)0)->pgkw_str107, (int)(long)&((struct pgkw_t *)0)->pgkw_str108, (int)(long)&((struct pgkw_t *)0)->pgkw_str109, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str111, (int)(long)&((struct pgkw_t *)0)->pgkw_str112, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str115, (int)(long)&((struct pgkw_t *)0)->pgkw_str116, (int)(long)&((struct pgkw_t *)0)->pgkw_str117, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str119, (int)(long)&((struct pgkw_t *)0)->pgkw_str120, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str122, (int)(long)&((struct pgkw_t *)0)->pgkw_str123, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str125, (int)(long)&((struct pgkw_t *)0)->pgkw_str126, (int)(long)&((struct pgkw_t *)0)->pgkw_str127, (int)(long)&((struct pgkw_t *)0)->pgkw_str128, (int)(long)&((struct pgkw_t *)0)->pgkw_str129, (int)(long)&((struct pgkw_t *)0)->pgkw_str130, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str132, (int)(long)&((struct pgkw_t *)0)->pgkw_str133, (int)(long)&((struct pgkw_t *)0)->pgkw_str134, (int)(long)&((struct pgkw_t *)0)->pgkw_str135, (int)(long)&((struct pgkw_t *)0)->pgkw_str136, (int)(long)&((struct pgkw_t *)0)->pgkw_str137, (int)(long)&((struct pgkw_t *)0)->pgkw_str138, (int)(long)&((struct pgkw_t *)0)->pgkw_str139, (int)(long)&((struct pgkw_t *)0)->pgkw_str140, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str142, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str144, (int)(long)&((struct pgkw_t *)0)->pgkw_str145, (int)(long)&((struct pgkw_t *)0)->pgkw_str146, (int)(long)&((struct pgkw_t *)0)->pgkw_str147, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str149, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str152, (int)(long)&((struct pgkw_t *)0)->pgkw_str153, (int)(long)&((struct pgkw_t *)0)->pgkw_str154, (int)(long)&((struct pgkw_t *)0)->pgkw_str155, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str157, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str160, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str162, (int)(long)&((struct pgkw_t *)0)->pgkw_str163, (int)(long)&((struct pgkw_t *)0)->pgkw_str164, (int)(long)&((struct pgkw_t *)0)->pgkw_str165, (int)(long)&((struct pgkw_t *)0)->pgkw_str166, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str168, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str170, -1, -1, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str175, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str177, -1, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str181, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str183, (int)(long)&((struct pgkw_t *)0)->pgkw_str184, (int)(long)&((struct pgkw_t *)0)->pgkw_str185, (int)(long)&((struct pgkw_t *)0)->pgkw_str186, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str188, -1, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str192, -1, -1, -1, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str198, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str200, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str202, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str204, (int)(long)&((struct pgkw_t *)0)->pgkw_str205, (int)(long)&((struct pgkw_t *)0)->pgkw_str206, (int)(long)&((struct pgkw_t *)0)->pgkw_str207, (int)(long)&((struct pgkw_t *)0)->pgkw_str208, -1, -1, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str213, -1, -1, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str218, (int)(long)&((struct pgkw_t *)0)->pgkw_str219, (int)(long)&((struct pgkw_t *)0)->pgkw_str220, -1, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str224, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str227, -1, -1, -1, -1, -1, -1, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str236, (int)(long)&((struct pgkw_t *)0)->pgkw_str237, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str265, (int)(long)&((struct pgkw_t *)0)->pgkw_str266, -1, -1, -1, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str272, (int)(long)&((struct pgkw_t *)0)->pgkw_str273, -1, -1, -1, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str279, -1, -1, -1, -1, -1, -1, -1, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str289, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, (int)(long)&((struct pgkw_t *)0)->pgkw_str311 }; if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH) { register int key = pg_keyword_lookup_hash (str, len); if (key <= MAX_HASH_VALUE && key >= 0) { register int o = wordlist[key]; if (o >= 0) { register const char *s = o + pgkw; if (*str == *s && !strcmp (str + 1, s + 1)) return s; } } } return 0; } skytools-3.2.6/lib/usual/statlist.h0000644000000000000000000001011112166266754014227 0ustar /* * Wrapper for list.h that keeps track of number of items. * * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * * Circular list that keep track of stats about the list. * * Currenly only count of abjects currently in list * is kept track of. The plan was to track more, * like max, but it was not useful enough. */ #ifndef _USUAL_STATLIST_H_ #define _USUAL_STATLIST_H_ #include /** * Header structure for StatList. */ struct StatList { /** Actual list head */ struct List head; /** Count of objects currently in list */ int cur_count; #ifdef LIST_DEBUG /** List name */ const char *name; #endif }; /** Define and initialize StatList head */ #ifdef LIST_DEBUG #define STATLIST(var) struct StatList var = { {&var.head, &var.head}, 0, #var } #else #define STATLIST(var) struct StatList var = { {&var.head, &var.head}, 0 } #endif /** Add to the start of the list */ static inline void statlist_prepend(struct StatList *list, struct List *item) { list_prepend(&list->head, item); list->cur_count++; } /** Add to the end of the list */ static inline void statlist_append(struct StatList *list, struct List *item) { list_append(&list->head, item); list->cur_count++; } /** Remove element from the list */ static inline void statlist_remove(struct StatList *list, struct List *item) { list_del(item); list->cur_count--; /* Assert(list->cur_count >= 0); */ } /** Initialize StatList head */ static inline void statlist_init(struct StatList *list, const char *name) { list_init(&list->head); list->cur_count = 0; #ifdef LIST_DEBUG list->name = name; #endif } /** return number of elements currently in list */ static inline int statlist_count(const struct StatList *list) { /* Assert(list->cur_count > 0 || list_empty(&list->head)); */ return list->cur_count; } /** remove and return first element */ static inline struct List *statlist_pop(struct StatList *list) { struct List *item = list_pop(&list->head); if (item) list->cur_count--; /* Assert(list->cur_count >= 0); */ return item; } /** Return first element */ static inline struct List *statlist_first(const struct StatList *list) { return list_first(&list->head); } /** Return last element */ static inline struct List *statlist_last(const struct StatList *list) { return list_last(&list->head); } /** Is list empty */ static inline bool statlist_empty(const struct StatList *list) { return list_empty(&list->head); } /** Loop over list */ #define statlist_for_each(item, list) list_for_each(item, &((list)->head)) /** Loop over list backwards */ #define statlist_for_each_reverse(item, list) list_for_each_reverse(item, &((list)->head)) /** Loop over list safely, so that elements can be removed during */ #define statlist_for_each_safe(item, list, tmp) list_for_each_safe(item, &((list)->head), tmp) /** Loop over list backwards safely, so that elements can be removed during */ #define statlist_for_each_reverse_safe(item, list, tmp) list_for_each_reverse_safe(item, &((list)->head), tmp) /** Put intem before another */ static inline void statlist_put_before(struct StatList *list, struct List *item, struct List *pos) { list_append(pos, item); list->cur_count++; } /** Put item after another */ static inline void statlist_put_after(struct StatList *list, struct List *item, struct List *pos) { list_prepend(pos, item); list->cur_count++; } #endif /* __LIST_H_ */ skytools-3.2.6/lib/usual/safeio.c0000644000000000000000000001162012166266754013627 0ustar /* * libusual - Utility library for C * * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * Wrappers around regular I/O functions (send/recv/read/write) * that survive EINTR and also can log problems. */ #include #include #include #include #include int safe_read(int fd, void *buf, int len) { int res; loop: res = read(fd, buf, len); if (res < 0 && errno == EINTR) goto loop; return res; } int safe_write(int fd, const void *buf, int len) { int res; loop: res = write(fd, buf, len); if (res < 0 && errno == EINTR) goto loop; return res; } int safe_recv(int fd, void *buf, int len, int flags) { int res; char ebuf[128]; loop: res = recv(fd, buf, len, flags); if (res < 0 && errno == EINTR) goto loop; if (res < 0) log_noise("safe_recv(%d, %d) = %s", fd, len, strerror_r(errno, ebuf, sizeof(ebuf))); else if (cf_verbose > 2) log_noise("safe_recv(%d, %d) = %d", fd, len, res); return res; } int safe_send(int fd, const void *buf, int len, int flags) { int res; char ebuf[128]; loop: res = send(fd, buf, len, flags); if (res < 0 && errno == EINTR) goto loop; if (res < 0) log_noise("safe_send(%d, %d) = %s", fd, len, strerror_r(errno, ebuf, sizeof(ebuf))); else if (cf_verbose > 2) log_noise("safe_send(%d, %d) = %d", fd, len, res); return res; } int safe_close(int fd) { int res; #ifndef WIN32 /* * POSIX says close() can return EINTR but fd state is "undefined" * later. Seems Linux and BSDs close the fd anyway and EINTR is * simply informative. Thus retry is dangerous. */ res = close(fd); #else /* * Seems on windows it can returns proper EINTR but only when * WSACancelBlockingCall() is called. As we don't do it, * ignore EINTR on win32 too. */ res = closesocket(fd); #endif if (res < 0) { char ebuf[128]; log_warning("safe_close(%d) = %s", fd, strerror_r(errno, ebuf, sizeof(ebuf))); } else if (cf_verbose > 2) { log_noise("safe_close(%d) = %d", fd, res); } /* ignore EINTR */ if (res < 0 && errno == EINTR) return 0; return res; } int safe_recvmsg(int fd, struct msghdr *msg, int flags) { int res; char ebuf[128]; loop: res = recvmsg(fd, msg, flags); if (res < 0 && errno == EINTR) goto loop; if (res < 0) log_warning("safe_recvmsg(%d, msg, %d) = %s", fd, flags, strerror_r(errno, ebuf, sizeof(ebuf))); else if (cf_verbose > 2) log_noise("safe_recvmsg(%d, msg, %d) = %d", fd, flags, res); return res; } int safe_sendmsg(int fd, const struct msghdr *msg, int flags) { int res; int msgerr_count = 0; char ebuf[128]; loop: res = sendmsg(fd, msg, flags); if (res < 0 && errno == EINTR) goto loop; if (res < 0) { log_warning("safe_sendmsg(%d, msg[%d,%d], %d) = %s", fd, (int)msg->msg_iov[0].iov_len, (int)msg->msg_controllen, flags, strerror_r(errno, ebuf, sizeof(ebuf))); /* with ancillary data on blocking socket OSX returns * EMSGSIZE instead of blocking. try to solve it by waiting */ if (errno == EMSGSIZE && msgerr_count < 20) { struct timeval tv = {1, 0}; log_warning("trying to sleep a bit"); select(0, NULL, NULL, NULL, &tv); msgerr_count++; goto loop; } } else if (cf_verbose > 2) log_noise("safe_sendmsg(%d, msg, %d) = %d", fd, flags, res); return res; } int safe_connect(int fd, const struct sockaddr *sa, socklen_t sa_len) { int res; char buf[128]; char ebuf[128]; loop: res = connect(fd, sa, sa_len); if (res < 0 && errno == EINTR) goto loop; if (res < 0 && (errno != EINPROGRESS || cf_verbose > 2)) log_noise("connect(%d, %s) = %s", fd, sa2str(sa, buf, sizeof(buf)), strerror_r(errno, ebuf, sizeof(ebuf))); else if (cf_verbose > 2) log_noise("connect(%d, %s) = %d", fd, sa2str(sa, buf, sizeof(buf)), res); return res; } int safe_accept(int fd, struct sockaddr *sa, socklen_t *sa_len_p) { int res; char buf[128]; char ebuf[128]; loop: res = accept(fd, sa, sa_len_p); if (res < 0 && errno == EINTR) goto loop; if (res < 0) log_noise("safe_accept(%d) = %s", fd, strerror_r(errno, ebuf, sizeof(ebuf))); else if (cf_verbose > 2) log_noise("safe_accept(%d) = %d (%s)", fd, res, sa2str(sa, buf, sizeof(buf))); return res; } skytools-3.2.6/lib/usual/hashing/0000755000000000000000000000000012166266754013636 5ustar skytools-3.2.6/lib/usual/hashing/siphash.c0000644000000000000000000000472512166266754015451 0ustar /* * Copyright (c) 2012 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #define SIP_ROUND1 \ v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \ v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \ v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \ v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32) #define SIP_ROUND2 SIP_ROUND1; SIP_ROUND1 #define SIP_ROUND4 SIP_ROUND2; SIP_ROUND2 #define SIP_ROUNDS(n) SIP_ROUND ## n #define sip_compress(n) \ do { \ v3 ^= m; \ SIP_ROUNDS(n); \ v0 ^= m; \ } while (0) #define sip_finalize(n) \ do { \ v2 ^= 0xff; \ SIP_ROUNDS(n); \ } while (0) uint64_t siphash24(const void *data, size_t len, uint64_t k0, uint64_t k1) { const uint8_t *s = data; const uint8_t *end = s + len - (len % 8); uint64_t v0 = k0 ^ UINT64_C(0x736f6d6570736575); uint64_t v1 = k1 ^ UINT64_C(0x646f72616e646f6d); uint64_t v2 = k0 ^ UINT64_C(0x6c7967656e657261); uint64_t v3 = k1 ^ UINT64_C(0x7465646279746573); uint64_t m; for (; s < end; s += 8) { m = le64dec(s); sip_compress(2); } m = (uint64_t)len << 56; switch (len & 7) { case 7: m |= (uint64_t)s[6] << 48; case 6: m |= (uint64_t)s[5] << 40; case 5: m |= (uint64_t)s[4] << 32; case 4: m |= (uint64_t)s[3] << 24; case 3: m |= (uint64_t)s[2] << 16; case 2: m |= (uint64_t)s[1] << 8; case 1: m |= (uint64_t)s[0]; break; case 0: break; } sip_compress(2); sip_finalize(4); return (v0 ^ v1 ^ v2 ^ v3); } uint64_t siphash24_secure(const void *data, size_t len) { static bool initialized; static uint64_t k0, k1; if (!initialized) { k0 = ((uint64_t)random() << 32) | random(); k1 = ((uint64_t)random() << 32) | random(); initialized = true; } return siphash24(data, len, k0, k1); } skytools-3.2.6/lib/usual/hashing/lookup3.h0000644000000000000000000000040612166266754015403 0ustar /** * @file * * Jenkins' lookup3 non-cryptographic hash. */ #ifndef _USUAL_HASHING_LOOKUP3_H_ #define _USUAL_HASHING_LOOKUP3_H_ #include /** * Calculate 64-bit hash over data */ uint64_t hash_lookup3(const void *data, size_t len); #endif skytools-3.2.6/lib/usual/hashing/crc32.h0000644000000000000000000000176312166266754014732 0ustar /* * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * * CRC32 checksum. */ #ifndef _USUAL_HASHING_CRC32_H_ #define _USUAL_HASHING_CRC32_H_ #include /** Calculate CRC32 checksum */ uint32_t calc_crc32(const void *data, size_t len, uint32_t init); #endif skytools-3.2.6/lib/usual/hashing/siphash.h0000644000000000000000000000207512166266754015452 0ustar /* * Copyright (c) 2012 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * * SipHash-2-4 */ #ifndef _USUAL_HASHING_SIPHASH_H_ #define _USUAL_HASHING_SIPHASH_H_ #include /** Calculate SipHash-2-4 checksum */ uint64_t siphash24(const void *data, size_t len, uint64_t k0, uint64_t k1); uint64_t siphash24_secure(const void *data, size_t len); #endif skytools-3.2.6/lib/usual/hashing/lookup3.c0000644000000000000000000000270612166266754015403 0ustar /* * The contents of this file are public domain. * * Based on: lookup3.c, by Bob Jenkins, May 2006, Public Domain. */ /* * Compact version of Bob Jenkins' lookup3.c hash. */ #include #include #define rot(x, k) (((x)<<(k)) | ((x)>>(32-(k)))) #define mix(a, b, c) do { \ a -= c; a ^= rot(c, 4); c += b; \ b -= a; b ^= rot(a, 6); a += c; \ c -= b; c ^= rot(b, 8); b += a; \ a -= c; a ^= rot(c,16); c += b; \ b -= a; b ^= rot(a,19); a += c; \ c -= b; c ^= rot(b, 4); b += a; \ } while (0) #define final(a, b, c) do { \ c ^= b; c -= rot(b,14); \ a ^= c; a -= rot(c,11); \ b ^= a; b -= rot(a,25); \ c ^= b; c -= rot(b,16); \ a ^= c; a -= rot(c, 4); \ b ^= a; b -= rot(a,14); \ c ^= b; c -= rot(b,24); \ } while (0) /* variable length copy of ~6 bytes, avoid call to libc */ static inline void simple_memcpy(void *dst_, const void *src_, size_t len) { const uint8_t *src = src_; uint8_t *dst = dst_; while (len--) *dst++ = *src++; } uint64_t hash_lookup3(const void *data, size_t len) { uint32_t a, b, c; uint32_t buf[3]; const uint8_t *p = data; a = b = c = 0xdeadbeef + len; if (len == 0) goto done; while (len > 12) { memcpy(buf, p, 12); a += buf[0]; b += buf[1]; c += buf[2]; mix(a, b, c); p += 12; len -= 12; } buf[0] = buf[1] = buf[2] = 0; simple_memcpy(buf, p, len); a += buf[0]; b += buf[1]; c += buf[2]; final(a, b, c); done: return ((uint64_t)b << 32) | c; } skytools-3.2.6/lib/usual/hashing/crc32.c0000644000000000000000000001021112166266754014711 0ustar /* * CRC32. * * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include static const uint32_t crc_tab[256] = { 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D }; static inline uint32_t crc32(uint32_t prev, uint8_t c) { return crc_tab[(prev ^ c) & 0xFF] ^ (prev >> 8); } uint32_t calc_crc32(const void *data, size_t len, uint32_t init) { const uint8_t *p = data; uint32_t crc = init ^ (~0); while (len--) crc = crc32(crc, *p++); return crc ^ (~0); } skytools-3.2.6/lib/usual/cbtree.c0000644000000000000000000001743012166266754013632 0ustar /* * Crit-bit tree / binary radix tree. * * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * Associates a C string with user pointer (called "obj"). * * Requires it's own internal nodes, thus not embeddable * to user structs. */ #include #include /* * - Childs are either other nodes or user pointers. * User pointers have lowest bit set. * * - All nodes have both childs. * * - Keys are handled as having infinite length, * zero-filled after actual end. */ struct Node { struct Node *child[2]; unsigned bitpos; }; struct CBTree { struct Node *root; cbtree_getkey_func obj_key_cb; cbtree_walker_func obj_free_cb; void *cb_ctx; CxMem *cx; }; #define SAME_KEY 0xFFFFFFFF /* * Low-level operations. */ /* does ptr point to user object or slot */ static inline int is_node(void *ptr) { return ((uintptr_t)(ptr) & 1) == 0; } /* flag pointer as pointing to user object */ static inline void *set_external(const void *obj) { return (void*)((uintptr_t)(obj) | 1); } /* remove flag from user pointer */ static inline void *get_external(void *extval) { return (void*)((uintptr_t)(extval) & (~1)); } /* get specific bit from string */ static inline unsigned get_bit(unsigned bitpos, const unsigned char *key, unsigned klen) { unsigned pos = bitpos / 8; unsigned bit = 7 - (bitpos % 8); return (pos < klen) && (key[pos] & (1 << bit)); } /* use callback to get key for a stored object */ static inline unsigned get_key(struct CBTree *tree, void *obj, const void **key_p) { return tree->obj_key_cb(tree->cb_ctx, obj, key_p); } /* check if object key matches argument */ static inline bool key_matches(struct CBTree *tree, void *obj, const void *key, unsigned klen) { const void *o_key; unsigned o_klen; o_klen = get_key(tree, obj, &o_key); return (o_klen == klen) && (memcmp(key, o_key, klen) == 0); } /* Find first differing bit on 2 strings. */ static unsigned find_crit_bit(const unsigned char *a, unsigned alen, const unsigned char *b, unsigned blen) { unsigned i, c, pos, av, bv; unsigned minlen = (alen > blen) ? blen : alen; unsigned maxlen = (alen > blen) ? alen : blen; /* find differing byte in common data */ for (i = 0; i < minlen; i++) { av = a[i]; bv = b[i]; if (av != bv) goto found; } /* find differing byte when one side is zero-filled */ for (; i < maxlen; i++) { av = (i < alen) ? a[i] : 0; bv = (i < blen) ? b[i] : 0; if (av != bv) goto found; } return SAME_KEY; found: /* calculate bits that differ */ c = av ^ bv; /* find the first one */ pos = 8 - fls(c); return i * 8 + pos; } /* * Lookup */ /* walk nodes until external pointer is found */ static void *raw_lookup(struct CBTree *tree, const void *key, unsigned klen) { struct Node *node = tree->root; unsigned bit; while (is_node(node)) { bit = get_bit(node->bitpos, key, klen); node = node->child[bit]; } return get_external(node); } /* actual lookup. returns obj ptr or NULL of not found */ void *cbtree_lookup(struct CBTree *tree, const void *key, unsigned klen) { void *obj; if (!tree->root) return NULL; /* find match based on bits we know about */ obj = raw_lookup(tree, key, klen); /* need to check if the object actually matches */ if (key_matches(tree, obj, key, klen)) return obj; return NULL; } /* * Insertion. */ /* node allocation */ static struct Node *new_node(struct CBTree *tree) { struct Node *node = cx_alloc(tree->cx, sizeof(*node)); memset(node, 0, sizeof(*node)); return node; } /* insert into empty tree */ static bool insert_first(struct CBTree *tree, void *obj) { tree->root = set_external(obj); return true; } /* insert into specific bit-position */ static bool insert_at(struct CBTree *tree, unsigned newbit, const void *key, unsigned klen, void *obj) { /* location of current node/obj pointer under examination */ struct Node **pos = &tree->root; struct Node *node; unsigned bit; while (is_node(*pos) && ((*pos)->bitpos < newbit)) { bit = get_bit((*pos)->bitpos, key, klen); pos = &(*pos)->child[bit]; } bit = get_bit(newbit, key, klen); node = new_node(tree); if (!node) return false; node->bitpos = newbit; node->child[bit] = set_external(obj); node->child[bit ^ 1] = *pos; *pos = node; return true; } /* actual insert: returns true -> insert ok or key found, false -> alloc failure */ bool cbtree_insert(struct CBTree *tree, void *obj) { const void *key, *old_key; unsigned newbit, klen, old_klen; void *old_obj; if (!tree->root) return insert_first(tree, obj); /* current key */ klen = get_key(tree, obj, &key); /* nearest key in tree */ old_obj = raw_lookup(tree, key, klen); old_klen = get_key(tree, old_obj, &old_key); /* first differing bit is the target position */ newbit = find_crit_bit(key, klen, old_key, old_klen); if (newbit == SAME_KEY) return true; return insert_at(tree, newbit, key, klen, obj); } /* * Key deletion. */ /* true -> object was found and removed, false -> not found */ bool cbtree_delete(struct CBTree *tree, const void *key, unsigned klen) { void *obj, *tmp; unsigned bit = 0; /* location of current node/obj pointer under examination */ struct Node **pos = &tree->root; /* if 'pos' has user obj, prev_pos has internal node pointing to it */ struct Node **prev_pos = NULL; if (!tree->root) return false; /* match bits we know about */ while (is_node(*pos)) { bit = get_bit((*pos)->bitpos, key, klen); prev_pos = pos; pos = &(*pos)->child[bit]; } /* does the key actually matches */ obj = get_external(*pos); if (!key_matches(tree, obj, key, klen)) return false; if (tree->obj_free_cb) tree->obj_free_cb(tree->cb_ctx, obj); /* drop the internal node pointing to our key */ if (prev_pos) { tmp = *prev_pos; *prev_pos = (*prev_pos)->child[bit ^ 1]; cx_free(tree->cx, tmp); } else { tree->root = NULL; } return true; } /* * Management. */ struct CBTree *cbtree_create(cbtree_getkey_func obj_key_cb, cbtree_walker_func obj_free_cb, void *cb_ctx, CxMem *cx) { struct CBTree *tree = cx_alloc(cx, sizeof(*tree)); if (!tree) return NULL; tree->root = NULL; tree->cb_ctx = cb_ctx; tree->obj_key_cb = obj_key_cb; tree->obj_free_cb = obj_free_cb; tree->cx = cx; return tree; } /* recursive freeing */ static void destroy_node(struct CBTree *tree, struct Node *node) { if (is_node(node)) { destroy_node(tree, node->child[0]); destroy_node(tree, node->child[1]); cx_free(tree->cx, node); } else if (tree->obj_free_cb) { void *obj = get_external(node); tree->obj_free_cb(tree->cb_ctx, obj); } } /* Free tree and all it's internal nodes. */ void cbtree_destroy(struct CBTree *tree) { if (tree->root) destroy_node(tree, tree->root); tree->root = NULL; cx_free(tree->cx, tree); } /* * walk over tree */ static bool walk(struct Node *node, cbtree_walker_func cb_func, void *cb_arg) { if (!is_node(node)) return cb_func(cb_arg, get_external(node)); return walk(node->child[0], cb_func, cb_arg) && walk(node->child[1], cb_func, cb_arg); } bool cbtree_walk(struct CBTree *tree, cbtree_walker_func cb_func, void *cb_arg) { if (!tree->root) return true; return walk(tree->root, cb_func, cb_arg); } skytools-3.2.6/lib/usual/base_win32.h0000644000000000000000000000616212166266754014327 0ustar /* * Random win32 compat. * * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _USUAL_BASE_WIN32_H_ #define _USUAL_BASE_WIN32_H_ #include #include #include #ifndef ECONNABORTED #define ECONNABORTED WSAECONNABORTED #endif #ifndef EMSGSIZE #define EMSGSIZE WSAEMSGSIZE #endif #ifndef EINPROGRESS #define EINPROGRESS WSAEWOULDBLOCK /* WSAEINPROGRESS */ #endif #undef EAGAIN #define EAGAIN WSAEWOULDBLOCK /* WSAEAGAIN */ #ifndef EAFNOSUPPORT #define EAFNOSUPPORT ENOSYS #endif /* dummy types / functions */ #define hstrerror strerror #define getuid() (6667) #define setsid() getpid() #define setgid(x) (-1) #define setuid(x) (-1) #define fork() (-1) #define geteuid() getuid() #define setgroups(s, p) (-1) #define chown(f, u, g) (-1) #define srandom(s) srand(s) #define random() rand() #ifdef _MSC_VER #define snprintf(fmt, ...) _snprintf(fmt, __VA_ARGS__) static inline int strcasecmp(const char *a, const char *b) { return _stricmp(a, b); } static inline int strncasecmp(const char *a, const char *b, size_t cnt) { return _strnicmp(a, b, cnt); } typedef int ssize_t; #endif /* getrlimit() */ #define RLIMIT_NOFILE -1 struct rlimit { int rlim_cur; int rlim_max; }; static inline int getrlimit(int res, struct rlimit *dst) { dst->rlim_cur = dst->rlim_max = -1; return 0; } /* dummy getpwnam() */ struct passwd { char *pw_name; char *pw_passwd; uid_t pw_uid; pid_t pw_gid; char *pw_gecos; char *pw_dir; char *pw_shell; }; static inline struct passwd *getpwnam(const char *u) { return NULL; } static inline struct passwd *getpwuid(uid_t uid) { return NULL; } /* dummy getgrnam() */ struct group { char *gr_name; char *gr_passwd; gid_t gr_gid; char **gr_mem; }; static inline struct group *getgrnam(const char *g) { return NULL; } static inline struct group *getgrgid(gid_t gid) { return NULL; } /* format specifiers that should be in */ #ifndef HAVE_INTTYPES_H #define PRId8 "d" #define PRId16 "d" #define PRId32 "d" #define PRId64 "I64d" #define PRIi8 "d" #define PRIi16 "d" #define PRIi32 "d" #define PRIi64 "I64d" #define PRIo8 "o" #define PRIo16 "o" #define PRIo32 "o" #define PRIo64 "I64o" #define PRIu8 "u" #define PRIu16 "u" #define PRIu32 "u" #define PRIu64 "I64u" #define PRIx8 "x" #define PRIx16 "x" #define PRIx32 "x" #define PRIx64 "I64x" #define PRIX8 "X" #define PRIX16 "X" #define PRIX32 "X" #define PRIX64 "I64X" #endif #endif skytools-3.2.6/lib/usual/cxextra.c0000644000000000000000000001300312166266754014034 0ustar /* * Extra allocators */ #include #include #include /* * Tools for allocators. */ static inline void *p_move(const void *p, int ofs) { return (char *)p + ofs; } /* * sample exit-on-failure wrapper */ static void *nofail_alloc(void *next, size_t len) { void *p = cx_alloc(next, len); if (!p) exit(1); return p; } static void *nofail_realloc(void *next, void *ptr, size_t len) { void *p = cx_realloc(next, ptr, len); if (!p) exit(1); return p; } static void nofail_free(void *next, const void *ptr) { cx_free(next, ptr); } static void nofail_destroy(void *next) { cx_destroy(next); } const struct CxOps cx_nofail_ops = { nofail_alloc, nofail_realloc, nofail_free, nofail_destroy, }; const struct CxMem cx_libc_nofail = { &cx_nofail_ops, (void*)&cx_libc_allocator, }; /* * Append-only pool. */ struct CxPoolSeg { struct CxPoolSeg *prev; unsigned size; unsigned used; }; struct CxPool { struct CxMem this; const struct CxMem *parent; struct CxPoolSeg *last; void *last_ptr; }; #define POOL_HDR ALIGN(sizeof(struct CxPoolSeg)) static void *pool_alloc(void *ctx, size_t size) { struct CxPool *pool = ctx; struct CxPoolSeg *seg = pool->last; void *ptr; unsigned nsize; size = ALIGN(size); if (seg && seg->used + size <= seg->size) { ptr = p_move(seg, POOL_HDR + seg->used); seg->used += size; pool->last_ptr = ptr; return ptr; } else { nsize = seg ? (2 * seg->size) : 512; while (nsize < size) nsize *= 2; seg = cx_alloc(pool->parent, POOL_HDR + nsize); if (seg == NULL) return NULL; seg->used = size; seg->size = nsize; seg->prev = pool->last; pool->last = seg; ptr = p_move(seg, POOL_HDR); pool->last_ptr = ptr; return ptr; } } /* free only last item */ static void pool_free(void *ctx, const void *ptr) { struct CxPool *pool = ctx; struct CxPoolSeg *cur = pool->last; const char *cstart; if (pool->last_ptr != ptr) return; cstart = p_move(cur, POOL_HDR); cur->used = (char *)ptr - cstart; pool->last_ptr = NULL; } /* realloc only last item */ static void *pool_realloc(void *ctx, void *ptr, size_t len) { struct CxPool *pool = ctx; struct CxPoolSeg *seg = pool->last; char *cstart, *cused, *p = ptr; size_t olen; if (pool->last_ptr != ptr) return NULL; cstart = p_move(seg, POOL_HDR); cused = cstart + seg->used; olen = cused - p; if (seg->used - olen + len <= seg->size) { seg->used = p + len - cstart; return p; } else { p = pool_alloc(ctx, len); if (!p) return NULL; memcpy(p, ptr, olen); return p; } } static void pool_destroy(void *ctx) { struct CxPool *pool = ctx; struct CxPoolSeg *cur, *tmp; if (!pool) return; for (cur = pool->last; cur; ) { tmp = cur->prev; cx_free(pool->parent, cur); cur = tmp; } cx_free(pool->parent, pool); } static const struct CxOps pool_ops = { pool_alloc, pool_realloc, pool_free, pool_destroy, }; /* * public functions */ CxMem *cx_new_pool(CxMem *parent) { struct CxPool *head; head = cx_alloc(parent, sizeof(*head)); if (!head) return NULL; head->parent = parent; head->this.ops = &pool_ops; head->this.ctx = head; head->last = NULL; return &head->this; } /* * tree alloc */ #define TREE_HDR (int)(sizeof(struct CxTreeItem)) struct CxTree { struct CxMem this; CxMem *real; struct List alloc_list; struct List subtree_node; struct List subtree_list; }; /* header for each allocation */ struct CxTreeItem { struct List node; }; static void *tree_alloc(void *ctx, size_t len) { struct CxTree *tree = ctx; struct CxTreeItem *item; item = cx_alloc(tree->real, TREE_HDR + len); if (!item) return NULL; list_init(&item->node); list_append(&tree->alloc_list, &item->node); return p_move(item, TREE_HDR); } static void *tree_realloc(void *ctx, void *ptr, size_t len) { struct CxTree *t = ctx; struct CxTreeItem *item, *item2; item = p_move(ptr, -TREE_HDR); list_del(&item->node); item2 = cx_realloc(t->real, item, TREE_HDR + len); if (item2) { list_append(&t->alloc_list, &item2->node); return p_move(item2, TREE_HDR); } else { list_append(&t->alloc_list, &item->node); return NULL; } } static void tree_free(void *ctx, const void *ptr) { struct CxTree *t = ctx; struct CxTreeItem *item; item = p_move(ptr, -TREE_HDR); list_del(&item->node); cx_free(t->real, item); } static void tree_destroy(void *ctx) { struct CxTree *tree = ctx, *sub; struct CxTreeItem *item; struct List *el, *tmp; /* unregister from parent */ list_del(&tree->subtree_node); /* free elements */ list_for_each_safe(el, &tree->alloc_list, tmp) { list_del(el); item = container_of(el, struct CxTreeItem, node); cx_free(tree->real, item); } /* free subtrees */ list_for_each_safe(el, &tree->subtree_list, tmp) { sub = container_of(el, struct CxTree, subtree_node); tree_destroy(sub); } /* free base struct */ cx_free(tree->real, tree); } static const struct CxOps tree_ops = { tree_alloc, tree_realloc, tree_free, tree_destroy, }; CxMem *cx_new_tree(CxMem *cx) { struct CxTree *t, *parent = NULL; CxMem *real = cx; /* * Try to allocate from real allocator. Otherwise allocations * will have double headers. */ if (cx->ops == &tree_ops) { parent = cx->ctx; real = parent->real; } /* initialize */ t = cx_alloc(real, sizeof(*t)); if (!t) return NULL; t->real = real; t->this.ops = &tree_ops; t->this.ctx = t; list_init(&t->alloc_list); list_init(&t->subtree_node); list_init(&t->subtree_list); /* register at parent */ if (parent) list_append(&parent->subtree_list, &t->subtree_node); return &t->this; } skytools-3.2.6/lib/usual/event.c0000644000000000000000000004074312166266754013512 0ustar /* * event.c - libevent compatible event loop. * * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * Small poll()-based async event loop, API-compatible with libevent. * * For sitations where full libevent is not necessary. */ #include #ifndef HAVE_LIBEVENT #include #include #include #include #include #ifndef MSG_NOSIGNAL #define MSG_NOSIGNAL 0 #endif /* max number of signals we care about */ #define MAX_SIGNAL 32 /* if tv_sec is larger, it's absolute timeout */ #define MAX_REL_TIMEOUT (30*24*60*60) /* if no nearby timeouts, max time to sleep (usecs) */ #define MAX_SLEEP (5*USEC) /* extra event flag to track if event is added */ #define EV_ACTIVE 0x80 struct event_base { /* pending timeouts */ struct Heap *timeout_heap; /* fd events */ struct StatList fd_list; /* pollfd <-> event mapping */ struct event **pfd_event; struct pollfd *pfd_list; int pfd_size; /* signal handling */ struct List sig_node; unsigned int sig_seen[MAX_SIGNAL]; struct List sig_waiters[MAX_SIGNAL]; int sig_send, sig_recv; struct event sig_ev; /* exit loop ASAP */ bool loop_break; /* finish current loop and exit */ bool loop_exit; /* cache if refreshed after each poll() */ usec_t cached_time; }; /* default event base */ static struct event_base *current_base; /* global signal data */ static volatile unsigned int sig_count[MAX_SIGNAL]; static bool signal_set_up[MAX_SIGNAL]; static struct sigaction old_handler[MAX_SIGNAL]; static LIST(sig_base_list); /* internal signal functions */ static bool sig_init(struct event_base *base, int sig); static void sig_close(struct event_base *base); /* * Debugging. */ #ifdef CASSERT #include #include #include #include static void base_dbg(struct event_base *base, const char *s, ...) { va_list ap; char buf[1024]; va_start(ap, s); vsnprintf(buf, sizeof(buf), s, ap); va_end(ap); log_noise("event base=%p: fdlist=%u timeouts=%d pfds=%d: %s", base, statlist_count(&base->fd_list), heap_size(base->timeout_heap), base->pfd_size, buf); } static void ev_dbg(struct event *ev, const char *s, ...) { va_list ap; char buf[1024], tval[128]; const char *typ = (ev->flags & EV_SIGNAL) ? "sig" : "fd"; va_start(ap, s); vsnprintf(buf, sizeof(buf), s, ap); va_end(ap); log_noise("event %s %d (flags=%s%s%s%s%s) [%s]: %s", typ, ev->fd, (ev->flags & EV_ACTIVE) ? "A" : "", (ev->flags & EV_PERSIST) ? "P" : "", (ev->flags & EV_TIMEOUT) ? "T" : "", (ev->flags & EV_READ) ? "R" : "", (ev->flags & EV_WRITE) ? "W" : "", (ev->flags & EV_TIMEOUT) ? format_time_ms(ev->timeout_val, tval, sizeof(tval)) : "-", buf); } #else #define base_dbg(b, ...) #define ev_dbg(b, ...) #endif /* * Helper functions. */ /* per-base time cache */ static usec_t get_base_time(struct event_base *base) { if (!base->cached_time) base->cached_time = get_time_usec(); return base->cached_time; } /* reset cached time */ static void reset_base_time(struct event_base *base) { base->cached_time = 0; } /* convert user tv to absolute tv */ static usec_t convert_timeout(struct event_base *base, struct timeval *tv) { usec_t val = tv->tv_sec * USEC + tv->tv_usec; if (tv->tv_sec < MAX_REL_TIMEOUT) val += get_base_time(base); return val; } static bool ev_is_better(const void *a, const void *b) { const struct event *ev1 = a, *ev2 = b; return ev1->timeout_val < ev2->timeout_val; } static void ev_save_pos(void *obj, unsigned pos) { struct event *ev = obj; ev->timeout_idx = pos; } /* enlarge pollfd array if needed */ static bool make_room(struct event_base *base, int need) { int total; void *tmp1; void *tmp2; if (need < base->pfd_size) return true; total = base->pfd_size * 2; if (total < 8) total = 8; while (total < need) total *= 2; tmp1 = realloc(base->pfd_list, total * sizeof(struct pollfd)); if (!tmp1) return false; base->pfd_list = tmp1; tmp2 = realloc(base->pfd_event, total * sizeof(struct event *)); if (!tmp2) return false; base->pfd_event = tmp2; base->pfd_size = total; return true; } /* * Single base functions. */ struct event_base *event_init(void) { struct event_base *base = event_base_new(); if (!current_base) current_base = base; return base; } int event_loop(int loop_flags) { return event_base_loop(current_base, loop_flags); } int event_loopbreak(void) { return event_base_loopbreak(current_base); } void event_set(struct event *ev, int fd, short flags, uevent_cb_f cb, void *arg) { event_assign(ev, current_base, fd, flags, cb, arg); } int event_once(int fd, short flags, uevent_cb_f cb_func, void *cb_arg, struct timeval *timeout) { return event_base_once(current_base, fd, flags, cb_func, cb_arg, timeout); } int event_loopexit(struct timeval *timeout) { return event_base_loopexit(current_base, timeout); } /* * Event base initialization. */ struct event_base *event_base_new(void) { struct event_base *base; int i; base = calloc(1, sizeof(*base)); if (!base) return NULL; /* initialize timeout and fd areas */ base->timeout_heap = heap_create(ev_is_better, ev_save_pos, USUAL_ALLOC); if (!base->timeout_heap) { free(base); return NULL; } statlist_init(&base->fd_list, "fd_list"); /* initialize signal areas */ for (i = 0; i < MAX_SIGNAL; i++) list_init(&base->sig_waiters[i]); list_init(&base->sig_node); base->sig_send = base->sig_recv = -1; /* allocate pollfds */ if (!make_room(base, 8)) { event_base_free(base); return NULL; } return base; } void event_base_free(struct event_base *base) { if (!base) { if (!current_base) return; base = current_base; } if (base == current_base) current_base = NULL; heap_destroy(base->timeout_heap); free(base->pfd_event); free(base->pfd_list); sig_close(base); free(base); } /* set flag to exit loop ASAP */ int event_base_loopbreak(struct event_base *base) { base->loop_break = true; return 0; } /* * Multi-base functions. */ /* fill event structure */ void event_assign(struct event *ev, struct event_base *base, int fd, short flags, uevent_cb_f cb, void *arg) { Assert(base); Assert((ev->flags & EV_ACTIVE) == 0); if (base == NULL) base = current_base; ev->fd = fd; ev->base = base; ev->flags = flags; ev->cb_func = cb; ev->cb_arg = arg; ev->ev_idx = -1; list_init(&ev->node); ev_dbg(ev, "event_set"); } /* Change base for a event */ int event_base_set(struct event_base *base, struct event *ev) { if (ev->flags & EV_ACTIVE) { errno = EINVAL; return -1; } ev->base = base; return 0; } /* Check if activated */ int is_event_active(struct event *ev) { return (ev->flags & EV_ACTIVE) ? 1 : 0; } /* de-activate event */ int event_del(struct event *ev) { struct event_base *base = ev->base; /* allow repeated deletions */ if ((ev->flags & EV_ACTIVE) == 0) { ev_dbg(ev, "event_del for inactive event??"); return 0; } ev_dbg(ev, "event_del"); /* remove from fd/signal list */ if (ev->flags & EV_SIGNAL) list_del(&ev->node); else if (ev->flags & (EV_READ | EV_WRITE)) statlist_remove(&base->fd_list, &ev->node); /* remove from timeout tree */ if (ev->flags & EV_TIMEOUT) { heap_remove(ev->base->timeout_heap, ev->timeout_idx); ev->flags &= ~EV_TIMEOUT; } /* clear reference to pollfd area */ if (ev->ev_idx >= 0) { ev->base->pfd_event[ev->ev_idx] = NULL; ev->ev_idx = -1; } /* tag inactive */ ev->flags &= ~EV_ACTIVE; return 0; } /* activate event */ int event_add(struct event *ev, struct timeval *timeout) { struct event_base *base = ev->base; Assert((ev->flags & EV_ACTIVE) == 0); Assert(base); /* sanity check, but dont do anything yet */ if (timeout) { if (ev->flags & EV_PERSIST) goto err_inval; if (!heap_reserve(base->timeout_heap, 1)) return -1; } else { if (ev->flags & EV_TIMEOUT) ev->flags &= ~EV_TIMEOUT; if (!(ev->flags & (EV_SIGNAL | EV_READ | EV_WRITE))) goto err_inval; } /* setup signal/fd */ if (ev->flags & EV_SIGNAL) { if (ev->flags & (EV_READ|EV_WRITE)) goto err_inval; if (!sig_init(base, ev->fd)) return -1; list_append(&base->sig_waiters[ev->fd], &ev->node); } else if (ev->flags & (EV_READ|EV_WRITE)) { statlist_append(&base->fd_list, &ev->node); } /* now act on timeout */ if (timeout) { ev->timeout_val = convert_timeout(base, timeout); ev->flags |= EV_TIMEOUT; heap_push(base->timeout_heap, ev); } ev->ev_idx = -1; ev->flags |= EV_ACTIVE; ev_dbg(ev, "event_add"); return 0; err_inval: errno = EINVAL; return -1; } /* * Event loop functions. */ static void deliver_event(struct event *ev, short flags) { ev_dbg(ev, "deliver_event: %d", flags); /* remove non-persitant event before calling user func */ if ((ev->flags & EV_PERSIST) == 0) event_del(ev); /* now call user func */ ev->cb_func(ev->fd, flags, ev->cb_arg); } static inline struct event *get_smallest_timeout(struct event_base *base) { return heap_top(base->timeout_heap); } /* decide how long poll() should sleep */ static int calc_timeout_ms(struct event_base *base) { struct event *ev; usec_t now; usec_t res; ev = get_smallest_timeout(base); if (!ev) return MAX_SLEEP / 1000; now = get_base_time(base); if (now + MAX_SLEEP < ev->timeout_val) res = MAX_SLEEP; else if (ev->timeout_val < now) res = 0; else res = ev->timeout_val - now; /* round up */ return (res + 999) / 1000; } /* deliver fd events */ static void process_fds(struct event_base *base, int pf_cnt) { int i; for (i = 0; i < pf_cnt; i++) { struct pollfd *pf = &base->pfd_list[i]; struct event *ev = base->pfd_event[i]; if (!ev) continue; base->pfd_event[i] = NULL; ev->ev_idx = -1; if (pf->revents & (POLLIN | POLLOUT | POLLERR | POLLHUP)) { int flags = ev->flags & (EV_READ | EV_WRITE); deliver_event(ev, flags); } if (base->loop_break) break; } } /* handle passed timeouts */ static void process_timeouts(struct event_base *base) { usec_t now; struct event *ev; ev = get_smallest_timeout(base); if (!ev) return; now = get_base_time(base); while (ev) { if (now < ev->timeout_val) break; deliver_event(ev, EV_TIMEOUT); if (base->loop_break) break; ev = get_smallest_timeout(base); } } /* main event loop */ int event_base_loop(struct event_base *base, int loop_flags) { int pf_cnt, res, timeout_ms; struct List *node; /* don't loop if non-block was requested */ if (loop_flags & EVLOOP_NONBLOCK) loop_flags |= EVLOOP_ONCE; base->loop_break = false; base->loop_exit = false; loop: if (!make_room(base, statlist_count(&base->fd_list))) return -1; /* fill pollfds */ pf_cnt = 0; statlist_for_each(node, &base->fd_list) { struct event *ev = container_of(node, struct event, node); struct pollfd *pf; ev->ev_idx = pf_cnt++; base->pfd_event[ev->ev_idx] = ev; pf = &base->pfd_list[ev->ev_idx]; pf->events = 0; pf->revents = 0; pf->fd = ev->fd; if (ev->flags & EV_READ) pf->events |= POLLIN; if (ev->flags & EV_WRITE) pf->events |= POLLOUT; } /* decide sleep time */ if (loop_flags & EVLOOP_NONBLOCK) timeout_ms = 0; else timeout_ms = calc_timeout_ms(base); /* forget cached time */ reset_base_time(base); /* poll for events */ res = poll(base->pfd_list, pf_cnt, timeout_ms); base_dbg(base, "poll(%d, timeout=%d) = res=%d errno=%d", pf_cnt, timeout_ms, res, res < 0 ? errno : 0); if (res == -1 && errno != EINTR) return -1; /* process events */ if (res > 0) { process_fds(base, pf_cnt); if (base->loop_break) return 0; } process_timeouts(base); /* decide whether to continue looping */ if (loop_flags & EVLOOP_ONCE) return 0; if (base->loop_break || base->loop_exit) return 0; goto loop; } /* * Signal handling. */ /* global signal handler registered via sigaction() */ static void uevent_sig_handler(int sig) { struct List *node, *tmp; struct event_base *base; uint8_t byte = sig; int res; if (sig < 0 || sig >= MAX_SIGNAL) return; sig_count[sig]++; list_for_each_safe(node, &sig_base_list, tmp) { base = container_of(node, struct event_base, sig_node); if (base->sig_send >= 0) { loop: res = send(base->sig_send, &byte, 1, MSG_NOSIGNAL); if (res == -1 && (errno == EINTR)) goto loop; } } } /* close signal resources on one base */ static void sig_close(struct event_base *base) { list_del(&base->sig_node); if (base->sig_send >= 0) close(base->sig_send); if (base->sig_recv >= 0) close(base->sig_recv); base->sig_recv = base->sig_send = -1; } /* call all handlers waiting for specific signal */ static void deliver_signal(struct event_base *base, int sig) { struct List *node, *tmp; list_for_each_safe(node, &base->sig_waiters[sig], tmp) { struct event *ev = container_of(node, struct event, node); deliver_event(ev, EV_SIGNAL); } } /* reader from sig socket, calls actual signal handlers */ static void sig_reader(int fd, short flags, void *arg) { struct event_base *base = arg; uint8_t buf[128]; int res, sig; /* drain the socket */ loop: res = recv(fd, buf, sizeof(buf), 0); if (res < 0) { if (errno == EINTR) goto loop; } else if ((res == sizeof(buf)) && (res > 1)) goto loop; /* now check for new signals */ for (sig = 0; sig < MAX_SIGNAL; sig++) { unsigned glob, local; if (list_empty(&base->sig_waiters[sig])) continue; glob = sig_count[sig]; local = base->sig_seen[sig]; if (glob != local) { base->sig_seen[sig] = glob; deliver_signal(base, sig); } } } /* setup signal handling for particular signal */ static bool sig_init(struct event_base *base, int sig) { int spair[2]; if (sig < 0 || sig >= MAX_SIGNAL) { errno = EINVAL; return false; } /* global handler setup */ if (!signal_set_up[sig]) { struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = uevent_sig_handler; sa.sa_flags = SA_RESTART; sigfillset(&sa.sa_mask); if (sigaction(sig, &sa, &old_handler[sig]) != 0) return false; } /* local handler for base */ if (list_empty(&base->sig_node)) { if (socketpair(AF_UNIX, SOCK_STREAM, 0, spair) != 0) return false; if (!socket_setup(spair[0], true)) goto failed; if (!socket_setup(spair[1], true)) goto failed; event_assign(&base->sig_ev, base, spair[1], EV_READ|EV_PERSIST, sig_reader, base); if (event_add(&base->sig_ev, NULL) != 0) goto failed; base->sig_send = spair[0]; base->sig_recv = spair[1]; list_append(&sig_base_list, &base->sig_node); } /* if first waiter, then ignore previous signals */ if (list_empty(&base->sig_waiters[sig])) base->sig_seen[sig] = sig_count[sig]; return true; failed: close(spair[0]); close(spair[1]); return false; } /* * One-time events. */ struct once_event { struct event ev; uevent_cb_f cb_func; void *cb_arg; }; static void once_handler(int fd, short flags, void *arg) { struct once_event *once = arg; uevent_cb_f cb_func = once->cb_func; void *cb_arg = once->cb_arg; free(once); cb_func(fd, flags, cb_arg); } /* wait for one-time event, provide event struct internally */ int event_base_once(struct event_base *base, int fd, short flags, uevent_cb_f cb_func, void *cb_arg, struct timeval *timeout) { struct once_event *once; if (flags & EV_PERSIST) { errno = EINVAL; return -1; } once = calloc(1, sizeof(*once)); if (!once) return -1; event_assign(&once->ev, base, fd, flags, once_handler, once); if (event_add(&once->ev, timeout) != 0) { free(once); return -1; } return 0; } /* * Stop loop at particular time. */ static void loopexit_handler(int fd, short flags, void *arg) { struct event_base *base = arg; base->loop_exit = true; } int event_base_loopexit(struct event_base *base, struct timeval *timeout) { if (!timeout) { errno = EINVAL; return -1; } return event_base_once(base, -1, 0, loopexit_handler, base, timeout); } /* * Info */ const char *event_get_version(void) { return "usual/event"; } const char *event_get_method(void) { return "poll"; } #endif /* !HAVE_LIBEVENT */ skytools-3.2.6/lib/usual/cxalloc.c0000644000000000000000000000431212166266754014006 0ustar /* * libusual - Utility library for C * * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include /* * Utility routines for cx_* API. */ void *cx_alloc(CxMem *cx, size_t len) { if (!len) return NULL; return cx->ops->c_alloc(cx->ctx, len); } void *cx_realloc(CxMem *cx, void *ptr, size_t len) { if (!ptr) return cx_alloc(cx, len); if (!len) { cx_free(cx, ptr); return NULL; } return cx->ops->c_realloc(cx->ctx, ptr, len); } void cx_free(CxMem *cx, const void *ptr) { if (ptr) cx->ops->c_free(cx->ctx, ptr); } void cx_destroy(CxMem *cx) { if (!cx) return; if (!cx->ops->c_destroy) abort(); cx->ops->c_destroy(cx->ctx); } void *cx_alloc0(CxMem *cx, size_t len) { void *p = cx_alloc(cx, len); if (p) memset(p, 0, len); return p; } void *cx_memdup(CxMem *cx, const void *src, size_t len) { void *p = cx_alloc(cx, len); if (p) memcpy(p, src, len); return p; } void *cx_strdup(CxMem *cx, const char *s) { return cx_memdup(cx, s, strlen(s) + 1); } /* * Base allocator that uses libc routines. */ static void *libc_alloc(void *ctx, size_t len) { return malloc(len); } static void *libc_realloc(void *ctx, void *ptr, size_t len) { return realloc(ptr, len); } static void libc_free(void *ctx, const void *ptr) { free(ptr); } static const struct CxOps libc_alloc_ops = { libc_alloc, libc_realloc, libc_free, }; const struct CxMem cx_libc_allocator = { &libc_alloc_ops, NULL, }; skytools-3.2.6/lib/usual/socket.c0000644000000000000000000002475412166266754013665 0ustar /* * Socket helpers and compat. * * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #ifdef HAVE_UCRED_H #include #endif #ifdef HAVE_SYS_UCRED_H #include #endif /* toggle non-blocking flag */ bool socket_set_nonblocking(int fd, bool non_block) { int flags; /* get old flags */ flags = fcntl(fd, F_GETFL, 0); if (flags < 0) return false; /* flip O_NONBLOCK */ if (non_block) flags |= O_NONBLOCK; else flags &= ~O_NONBLOCK; /* set new flags */ if (fcntl(fd, F_SETFL, flags) < 0) return false; return true; } /* initial socket setup */ bool socket_setup(int sock, bool non_block) { int res; #ifdef SO_NOSIGPIPE /* disallow SIGPIPE, if possible */ int val = 1; res = setsockopt(sock, SOL_SOCKET, SO_NOSIGPIPE, &val, sizeof(val)); if (res < 0) return false; #endif /* close fd on exec */ res = fcntl(sock, F_SETFD, FD_CLOEXEC); if (res < 0) return false; /* when no data available, return EAGAIN instead blocking */ if (!socket_set_nonblocking(sock, non_block)) return false; return true; } bool socket_set_keepalive(int fd, int onoff, int keepidle, int keepintvl, int keepcnt) { int val, res; if (!onoff) { /* turn keepalive off */ val = 0; res = setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &val, sizeof(val)); return (res == 0); } /* turn keepalive on */ val = 1; res = setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &val, sizeof(val)); if (res < 0) return false; /* Darwin */ #ifdef TCP_KEEPALIVE if (keepidle) { val = keepidle; res = setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &val, sizeof(val)); if (res < 0 && errno != ENOPROTOOPT) return false; } #endif /* Linux, NetBSD */ #ifdef TCP_KEEPIDLE if (keepidle) { val = keepidle; res = setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &val, sizeof(val)); if (res < 0 && errno != ENOPROTOOPT) return false; } #endif #ifdef TCP_KEEPINTVL if (keepintvl) { val = keepintvl; res = setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &val, sizeof(val)); if (res < 0 && errno != ENOPROTOOPT) return false; } #endif #ifdef TCP_KEEPCNT if (keepcnt > 0) { val = keepcnt; res = setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &val, sizeof(val)); if (res < 0 && errno != ENOPROTOOPT) return false; } #endif /* Windows */ #ifdef SIO_KEEPALIVE_VALS if (keepidle || keepintvl) { struct tcp_keepalive vals; DWORD outlen = 0; if (!keepidle) keepidle = 5 * 60; if (!keepintvl) keepintvl = 15; vals.onoff = 1; vals.keepalivetime = keepidle * 1000; vals.keepaliveinterval = keepintvl * 1000; res = WSAIoctl(fd, SIO_KEEPALIVE_VALS, &vals, sizeof(vals), NULL, 0, &outlen, NULL, NULL); if (res != 0) return false; } #endif return true; } /* * Convert sockaddr to string. Supports ipv4, ipv6 and unix sockets. */ const char *sa2str(const struct sockaddr *sa, char *dst, int dstlen) { const struct sockaddr_in *in; const struct sockaddr_in6 *in6; const struct sockaddr_un *un; const char *tmp; char buf[128]; switch (sa->sa_family) { case AF_INET: in = (struct sockaddr_in *)sa; tmp = inet_ntop(AF_INET, &in->sin_addr, buf, sizeof(buf)); if (!tmp) return NULL; snprintf(dst, dstlen, "%s:%d", tmp, ntohs(in->sin_port)); break; case AF_INET6: in6 = (struct sockaddr_in6 *)sa; tmp = inet_ntop(AF_INET6, &in6->sin6_addr, buf, sizeof(buf)); if (!tmp) return NULL; snprintf(dst, dstlen, "%s/%d", tmp, ntohs(in6->sin6_port)); break; case AF_UNIX: un = (struct sockaddr_un *)sa; snprintf(dst, dstlen, "unix:%s", un->sun_path); break; default: snprintf(dst, dstlen, "sa2str(%d): unknown proto", sa->sa_family); break; } return dst; } #ifndef HAVE_GETPEEREID /* * Get other side's uid and git for UNIX socket. */ int getpeereid(int fd, uid_t *uid_p, gid_t *gid_p) { pid_t pid; return getpeercreds(fd, uid_p, gid_p, &pid); } #endif /* * Get uid, gid and pid of unix socket peer. * * Pid may not be availalbe on some OSes. * It's set to 0 then. */ int getpeercreds(int fd, uid_t *uid_p, gid_t *gid_p, pid_t *pid_p) { /* What a mess */ #if defined(SO_PEERCRED) #ifdef HAVE_SYS_UCRED_H struct sockpeercred cred; /* openbsd */ #else struct ucred cred; /* linux */ #endif socklen_t len = sizeof(cred); if (getsockopt(fd, SOL_SOCKET, SO_PEERCRED, &cred, &len) >= 0) { *uid_p = cred.uid; *gid_p = cred.gid; *pid_p = cred.pid; return 0; } return -1; #elif defined(HAVE_GETPEERUCRED) /* solaris */ ucred_t *cred = NULL; if (getpeerucred(fd, &cred) >= 0) { *uid_p = ucred_geteuid(cred); *gid_p = ucred_getegid(cred); *pid_p = ucred_getpid(cred); ucred_free(cred); if ((int)*uid_p == -1 || (int)*gid_p == -1) return -1; return 0; } return -1; #elif defined(LOCAL_PEEREID) /* netbsd */ struct unpcbid cred; socklen_t len = sizeof(cred); if (getsockopt(fd, 0, LOCAL_PEEREID, &cred, &len) < 0) return -1; *uid_p = cred.unp_euid; *gid_p = cred.unp_egid; *pid_p = cred.unp_pid; return 0; #elif defined(HAVE_GETPEEREID) /* generic bsd; no pid */ *pid_p = 0; return getpeereid(fd, uid_p, gid_p); #elif defined(LOCAL_PEERCRED) /* old freebsd, osx; no pid */ struct xucred cred; socklen_t len = sizeof(cred); if (getsockopt(fd, 0, LOCAL_PEERCRED, &cred, &len) < 0) return -1; if (cred.cr_version != XUCRED_VERSION) { errno = EIO; return -1; } *uid_p = cred.cr_uid; *gid_p = cred.cr_gid; *pid_p = 0; return 0; #else /* no implementation */ errno = ENOSYS; return -1; #endif } #ifndef HAVE_POLL /* * Emulate poll() with select() */ #ifdef HAVE_SYS_SELECT_H #include #endif /* * dynamic buffer for fd_set to avoid depending on FD_SETSIZE */ struct fd_buf { fd_set *set; int alloc_bytes; }; static void fdbuf_zero(struct fd_buf *buf) { if (buf->set) memset(buf->set, 0, buf->alloc_bytes); } static bool fdbuf_resize(struct fd_buf *buf, int fd) { int need_bytes; unsigned char *ptr; /* default allocation */ int alloc = sizeof(fd_set); #ifdef WIN32 int cnt = buf->set ? buf->set->fd_count : 0; /* win32 fd_set is array of handles, +8 for count&padding */ need_bytes = (cnt + 1) * sizeof(buf->set->fd_array[0]) + 8; #else /* otherwise, fd_set is bitmap, +8 for int/long alignment */ need_bytes = fd / 8 + 8; #endif if (buf->alloc_bytes < need_bytes) { while (alloc < need_bytes) alloc *= 2; if (!buf->set) ptr = malloc(alloc); else ptr = realloc(buf->set, alloc); if (!ptr) return false; /* clean new area */ memset(ptr + buf->alloc_bytes, 0, alloc - buf->alloc_bytes); buf->set = (fd_set *)ptr; buf->alloc_bytes = alloc; } return true; } /* win32: make macros ignore FD_SETSIZE */ #undef FD_SETSIZE #define FD_SETSIZE (1 << 30) int poll(struct pollfd *fds, nfds_t nfds, int timeout_ms) { static struct fd_buf readfds = { NULL, 0 }; static struct fd_buf writefds = { NULL, 0 }; struct pollfd *pf; int res, fd_max = 0; struct timeval *tv = NULL; struct timeval tvreal; unsigned i; /* convert timeout_ms to timeval */ if (timeout_ms >= 0) { tvreal.tv_sec = timeout_ms / 1000; tvreal.tv_usec = (timeout_ms % 1000) * 1000; tv = &tvreal; } else if (timeout_ms < -1) goto err_inval; /* * Convert pollfds to fd sets. */ fdbuf_zero(&readfds); fdbuf_zero(&writefds); for (i = 0; i < nfds; i++) { pf = fds + i; if (pf->fd < 0) goto err_badf; /* sets must be equal size */ if (!fdbuf_resize(&readfds, pf->fd)) goto err_nomem; if (!fdbuf_resize(&writefds, pf->fd)) goto err_nomem; if (pf->events & POLLIN) FD_SET((unsigned)pf->fd, readfds.set); if (pf->events & POLLOUT) FD_SET((unsigned)pf->fd, writefds.set); if (pf->fd > fd_max) fd_max = pf->fd; } res = select(fd_max + 1, readfds.set, writefds.set, NULL, tv); if (res <= 0) return res; /* * select() and poll() count fd-s differently, * need to recount them here. */ res = 0; for (i = 0; i < nfds; i++) { pf = fds + i; pf->revents = 0; if ((pf->events & POLLIN) && FD_ISSET(pf->fd, readfds.set)) pf->revents |= POLLIN; if ((pf->events & POLLOUT) && FD_ISSET(pf->fd, writefds.set)) pf->revents |= POLLOUT; if (pf->revents) res += 1; } return res; err_nomem: errno = ENOMEM; return -1; err_badf: errno = EBADF; return -1; err_inval: errno = EINVAL; return -1; } #endif /* PLPROXY_POLL_COMPAT */ #ifdef WIN32 /* create local TCP socket, idea from libevent/Tor */ int win32_socketpair(int d, int typ, int proto, int sv[2]) { int list = -1, s1 = -1, s2 = -1; struct sockaddr_in sa1, sa2; socklen_t slen = sizeof(sa1); int res; if (d != AF_INET && d != AF_UNIX) goto err_inval; if (proto || !sv) goto err_inval; /* prepare sockaddr for bind */ memset(&sa1, 0, sizeof(sa1)); sa1.sin_family = AF_INET; sa1.sin_addr.s_addr = htonl(INADDR_LOOPBACK); sa1.sin_port = htons(0); /* create listen socket */ list = socket(AF_INET, typ, 0); if (list == -1) return -1; res = bind(list, (struct sockaddr *)&sa1, sizeof(sa1)); if (res == -1) goto failed; res = listen(list, 1); if (res == -1) goto failed; /* read listen port */ res = getsockname(list, (struct sockaddr *)&sa1, &slen); if (res == -1 || slen != sizeof(sa1)) goto failed; /* connect to it */ s1 = socket(AF_INET, typ, 0); if (s1 == -1) goto failed; res = connect(s1, (struct sockaddr *)&sa1, sizeof(sa1)); if (res == -1) goto failed; /* and accept from other end */ s2 = accept(list, (struct sockaddr *)&sa2, &slen); if (s2 == -1 || slen != sizeof(sa2)) goto failed; /* sanity check */ res = getsockname(s1, (struct sockaddr *)&sa1, &slen); if (res == -1 || slen != sizeof(sa1)) goto failed; if (sa1.sin_port != sa2.sin_port) goto failed; closesocket(list); sv[0] = s1; sv[1] = s2; return 0; failed: errno = (res == -1) ? WSAGetLastError() : EFAULT; if (list != -1) closesocket(list); if (s1 != -1) closesocket(s1); if (s2 != -1) closesocket(s2); return -1; err_inval: errno = EINVAL; return -1; } #endif skytools-3.2.6/lib/usual/netdb.h0000644000000000000000000000363012166266754013464 0ustar /* * libusual - Utility library for C * * Copyright (c) 2010 Marko Kreen, Skype Technologies * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** @file * * DNS lookup. */ #ifndef _USUAL_NETDB_H_ #define _USUAL_NETDB_H_ #include #ifdef HAVE_NETDB_H #include #endif #ifndef HAVE_GETADDRINFO_A /** Async execution */ #ifndef GAI_WAIT #define GAI_WAIT 0 #endif /** Synchronous execution */ #ifndef GAI_NOWAIT #define GAI_NOWAIT 1 #endif /* avoid name conflicts */ #define gaicb usual_gaicb #define getaddrinfo_a(a,b,c,d) usual_getaddrinfo_a(a,b,c,d) /** * Request data for getaddrinfo_a(). * * Fields correspond to getaddrinfo() parameters. */ struct gaicb { /** node name */ const char *ar_name; /** service name */ const char *ar_service; /** hints */ const struct addrinfo *ar_request; /** result */ struct addrinfo *ar_result; /* internal state */ int _state; }; #ifndef EAI_INPROGRESS #define EAI_INPROGRESS -100 #endif #ifndef EAI_SYSTEM #define EAI_SYSTEM -10 #endif #define gai_error(gcb) ((gcb)->_state) /** * Compat: Async DNS lookup. */ int getaddrinfo_a(int mode, struct gaicb *list[], int nitems, struct sigevent *sevp); #endif /* HAVE_GETADDRINFO_A */ #endif /* _USUAL_NETDB_H_ */ skytools-3.2.6/lib/usual/safeio.h0000644000000000000000000000324112166266754013634 0ustar /* * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** @file * * EINTR-safe I/O functions. */ #ifndef _USUAL_SAFEIO_H_ #define _USUAL_SAFEIO_H_ #include /** read */ int safe_read(int fd, void *buf, int len) _MUSTCHECK; /** write */ int safe_write(int fd, const void *buf, int len) _MUSTCHECK; /** recv */ int safe_recv(int fd, void *buf, int len, int flags) _MUSTCHECK; /** send */ int safe_send(int fd, const void *buf, int len, int flags) _MUSTCHECK; /** close */ int safe_close(int fd); /** recvmsg */ int safe_recvmsg(int fd, struct msghdr *msg, int flags) _MUSTCHECK; /** sendmsg */ int safe_sendmsg(int fd, const struct msghdr *msg, int flags) _MUSTCHECK; /** connect */ int safe_connect(int fd, const struct sockaddr *sa, socklen_t sa_len) _MUSTCHECK; /** accept */ int safe_accept(int fd, struct sockaddr *sa, socklen_t *sa_len) _MUSTCHECK; #endif skytools-3.2.6/lib/usual/base.c0000644000000000000000000000272212166266754013276 0ustar /* * Basic C environment. * * Copyright (c) 2007-2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #if defined(HAVE_MALLOC_H) && defined(__darwin__) #include #endif /* define posix_memalign() only when possible to emulate */ #if !defined(HAVE_POSIX_MEMALIGN) \ && (defined(HAVE_MEMALIGN) || defined(HAVE_VALLOC)) int posix_memalign(void **ptr_p, size_t align, size_t len) { void *p; int ret, old_errno = errno; #ifdef HAVE_MEMALIGN p = memalign(align, len); #else /* !HAVE_MEMALIGN */ #ifdef HAVE_VALLOC /* assuming less than pagesize alignment */ p = valloc(len); #endif /* !VALLOC */ #endif /* !MEMALIGN */ *ptr_p = p; if (p) return 0; /* on error restore old errno */ ret = errno; errno = old_errno; return ret; } #endif skytools-3.2.6/lib/usual/cfparser.c0000644000000000000000000002603012166266754014167 0ustar /* * Config file parser. * * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #ifdef HAVE_PWD_H #include #endif #include #include #include #include /* * INI file parser. */ static int count_lines(const char *s, const char *end) { int lineno = 1; for (; s < end; s++) { if (*s == '\n') lineno++; } return lineno; } bool parse_ini_file(const char *fn, cf_handler_f user_handler, void *arg) { char *buf; char *p, *key, *val; int klen, vlen; char o1, o2; bool ok; buf = load_file(fn, NULL); if (buf == NULL) return false; p = buf; while (*p) { /* space at the start of line - including empty lines */ while (*p && isspace(*p)) p++; /* skip comment lines */ if (*p == '#' || *p == ';') { while (*p && *p != '\n') p++; continue; } /* got new section */ if (*p == '[') { key = ++p; while (*p && *p != ']' && *p != '\n') p++; if (*p != ']') goto syntax_error; o1 = *p; *p = 0; log_debug("parse_ini_file: [%s]", key); ok = user_handler(arg, true, key, NULL); *p++ = o1; if (!ok) goto failed; continue; } /* done? */ if (*p == 0) break; /* read key val */ key = p; while (*p && (isalnum(*p) || strchr("_.-*", *p))) p++; klen = p - key; /* expect '=', skip it */ while (*p && (*p == ' ' || *p == '\t')) p++; if (*p != '=') { goto syntax_error; } else p++; while (*p && (*p == ' ' || *p == '\t')) p++; /* now read value */ val = p; while (*p && (*p != '\n')) p++; vlen = p - val; /* eat space at end */ while (vlen > 0 && isspace(val[vlen - 1])) vlen--; /* skip junk */ while (*p && isspace(*p)) p++; /* our buf is r/w, so take it easy */ o1 = key[klen]; o2 = val[vlen]; key[klen] = 0; val[vlen] = 0; log_debug("parse_ini_file: '%s' = '%s'", key, val); ok = user_handler(arg, false, key, val); log_debug("parse_ini_file: '%s' = '%s' ok:%d", key, val, ok); /* restore data, to keep count_lines() working */ key[klen] = o1; val[vlen] = o2; if (!ok) goto failed; } free(buf); return true; syntax_error: log_error("syntax error in configuration (%s:%d), stopping loading", fn, count_lines(buf, p)); failed: free(buf); return false; } /* * Config framework. */ static void *get_dest(void *base, const struct CfKey *k) { char *dst; if (k->flags & CF_VAL_REL) { /* relative address requires base */ if (!base) return NULL; dst = (char *)base + k->key_ofs; } else dst = (char *)k->key_ofs; return dst; } static const struct CfSect *find_sect(const struct CfContext *cf, const char *name) { const struct CfSect *s; for (s = cf->sect_list; s->sect_name; s++) { if (strcmp(s->sect_name, name) == 0) return s; if (strcmp(s->sect_name, "*") == 0) return s; } return NULL; } static const struct CfKey *find_key(const struct CfSect *s, const char *key) { const struct CfKey *k; for (k = s->key_list; k->key_name; k++) { if (strcmp(k->key_name, key) == 0) return k; } return k; } const char *cf_get(const struct CfContext *cf, const char *sect, const char *key, char *buf, int buflen) { const struct CfSect *s; const struct CfKey *k; void *base, *p; struct CfValue cv; /* find section */ s = find_sect(cf, sect); if (!s) return NULL; /* find section base */ base = cf->base; if (s->base_lookup) base = s->base_lookup(base, sect); /* handle dynamic keys */ if (s->set_key) { if (!s->get_key) return NULL; return s->get_key(base, key, buf, buflen); } /* get fixed key */ k = find_key(s, key); if (!k || !k->op.getter) return NULL; p = get_dest(base, k); if (!p) return NULL; cv.key_name = k->key_name; cv.extra = k->op.op_extra; cv.value_p = p; cv.buf = buf; cv.buflen = buflen; return k->op.getter(&cv); } bool cf_set(const struct CfContext *cf, const char *sect, const char *key, const char *val) { const struct CfSect *s; const struct CfKey *k; void *base, *p; struct CfValue cv; /* find section */ s = find_sect(cf, sect); if (!s) { log_error("Unknown section: %s", sect); return false; } /* find section base */ base = cf->base; if (s->base_lookup) base = s->base_lookup(base, sect); /* handle dynamic keys */ if (s->set_key) return s->set_key(base, key, val); /* set fixed key */ k = find_key(s, key); if (!k) { log_error("Unknown parameter: %s/%s", sect, key); return false; } if (!k->op.setter || (k->flags & CF_READONLY)) { /* silently ignore */ return true; } if ((k->flags & CF_NO_RELOAD) && cf->loaded) { /* silently ignore */ return true; } p = get_dest(base, k); if (!p) { log_error("Bug - no base for relative key: %s/%s", sect, key); return false; } cv.key_name = k->key_name; cv.extra = k->op.op_extra; cv.value_p = p; cv.buf = NULL; cv.buflen = 0; return k->op.setter(&cv, val); } /* * File loader */ struct LoaderCtx { const struct CfContext *cf; const char *cur_sect; void *top_base; bool got_main_sect; }; static bool fill_defaults(struct LoaderCtx *ctx) { const struct CfKey *k; const struct CfSect *s; s = find_sect(ctx->cf, ctx->cur_sect); if (!s) goto fail; if (s == ctx->cf->sect_list) ctx->got_main_sect = true; if (s->section_start) { if (!s->section_start(ctx->top_base, ctx->cur_sect)) return false; } if (s->set_key) return true; for (k = s->key_list; k->key_name; k++) { if (!k->def_value || (k->flags & CF_READONLY)) continue; if ((k->flags & CF_NO_RELOAD) && ctx->cf->loaded) continue; if (!cf_set(ctx->cf, ctx->cur_sect, k->key_name, k->def_value)) goto fail; } return true; fail: log_error("fill_defaults fail"); return false; } static bool load_handler(void *arg, bool is_sect, const char *key, const char *val) { struct LoaderCtx *ctx = arg; if (is_sect) { if (ctx->cur_sect) free(ctx->cur_sect); ctx->cur_sect = strdup(key); if (!ctx->cur_sect) return false; return fill_defaults(ctx); } else if (!ctx->cur_sect) { log_error("load_init_file: value without section: %s", key); return false; } else { return cf_set(ctx->cf, ctx->cur_sect, key, val); } } bool cf_load_file(const struct CfContext *cf, const char *fn) { struct LoaderCtx ctx; bool ok; memset(&ctx, 0, sizeof(ctx)); ctx.cf = cf; ok = parse_ini_file(fn, load_handler, &ctx); if (ctx.cur_sect) free(ctx.cur_sect); if (ok && !ctx.got_main_sect) { log_error("load_init_file: main section missing from config file"); return false; } return ok; } /* * Various value parsers. */ bool cf_set_int(struct CfValue *cv, const char *value) { int *ptr = cv->value_p; char *end; long val; errno = 0; val = strtol(value, &end, 0); if (end == value || *end != 0) { /* reject partial parse */ if (!errno) errno = EINVAL; return false; } *ptr = val; return true; } bool cf_set_uint(struct CfValue *cv, const char *value) { unsigned int *ptr = cv->value_p; char *end; unsigned long val; errno = 0; val = strtoul(value, &end, 0); if (end == value || *end != 0) { /* reject partial parse */ if (!errno) errno = EINVAL; return false; } *ptr = val; return true; } bool cf_set_str(struct CfValue *cv, const char *value) { char **dst_p = cv->value_p; char *tmp = strdup(value); if (!tmp) { log_error("cf_set_str: no mem"); return false; } if (*dst_p) free(*dst_p); *dst_p = tmp; return true; } bool cf_set_filename(struct CfValue *cv, const char *value) { char **dst_p = cv->value_p; char *tmp, *home, *p; int v_len, usr_len, home_len; struct passwd *pw; /* do we need to do tilde expansion */ if (value[0] != '~') return cf_set_str(cv, value); /* find username end */ v_len = strlen(value); if ((p = memchr(value, '/', v_len)) == NULL) usr_len = v_len - 1; else usr_len = (p - value) - 1; if (usr_len) { p = malloc(usr_len + 1); if (!p) return false; memcpy(p, value + 1, usr_len); p[usr_len] = 0; pw = getpwnam(p); free(p); if (!pw) goto fail; home = pw->pw_dir; } else { home = getenv("HOME"); if (!home) { pw = getpwuid(getuid()); if (!pw) goto fail; home = pw->pw_dir; } } if (!home) goto fail; home_len = strlen(home); tmp = malloc(v_len - usr_len + home_len); if (!tmp) return false; memcpy(tmp, home, home_len); memcpy(tmp + home_len, value + usr_len + 1, v_len - usr_len - 1); tmp[v_len - 1 - usr_len + home_len] = 0; log_debug("expanded '%s' -> '%s'", value, tmp); if (*dst_p) free(*dst_p); *dst_p = tmp; return true; fail: log_error("cannot to expand filename: %s", value); return false; } /* parse float with error checking. returns -1 if failed */ static double parse_time(const char *value) { double v; char *endp = NULL; errno = 0; v = strtod(value, &endp); if (errno) return -1; if (*endp || endp == value || v < 0) { errno = EINVAL; return -1; } return v; } bool cf_set_time_usec(struct CfValue *cv, const char *value) { usec_t *ptr = cv->value_p; double v = parse_time(value); if (v < 0) return false; *ptr = (usec_t)(USEC * v); return true; } bool cf_set_time_double(struct CfValue *cv, const char *value) { double *ptr = cv->value_p; double v = parse_time(value); if (v < 0) return false; *ptr = v; return true; } /* * Various value formatters. */ const char *cf_get_str(struct CfValue *cv) { char **p = cv->value_p; return *p; } const char *cf_get_int(struct CfValue *cv) { int *p = cv->value_p; snprintf(cv->buf, cv->buflen, "%d", *p); return cv->buf; } const char *cf_get_uint(struct CfValue *cv) { unsigned int *p = cv->value_p; snprintf(cv->buf, cv->buflen, "%u", *p); return cv->buf; } const char *cf_get_time_double(struct CfValue *cv) { double *p = cv->value_p; snprintf(cv->buf, cv->buflen, "%g", *p); return cv->buf; } const char *cf_get_time_usec(struct CfValue *cv) { struct CfValue tmp = *cv; usec_t *p = cv->value_p; double d = (double)(*p) / USEC; tmp.value_p = &d; return cf_get_time_double(&tmp); } /* * str->int mapping */ const char *cf_get_lookup(struct CfValue *cv) { int *p = cv->value_p; const struct CfLookup *lk = cv->extra; for (; lk->name; lk++) { if (lk->value == *p) return lk->name; } return "INVALID"; } bool cf_set_lookup(struct CfValue *cv, const char *value) { int *p = cv->value_p; const struct CfLookup *lk = cv->extra; for (; lk->name; lk++) { if (strcasecmp(lk->name, value) == 0) { *p = lk->value; return true; } } return false; } skytools-3.2.6/lib/usual/cfparser.h0000644000000000000000000001413012166266754014172 0ustar /* * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** @file * Config file parser. */ #ifndef _USUAL_CFPARSER_H_ #define _USUAL_CFPARSER_H_ #include /** * @name Simple line-by-line parser * @{ */ /** Callback signarure for @ref parse_ini_file() */ typedef bool (*cf_handler_f)(void *arg, bool is_sect, const char *key, const char *val); /** * Simple parser, launches callback for each line */ bool parse_ini_file(const char *fn, cf_handler_f user_handler, void *arg) _MUSTCHECK; /* @} */ /** * @name Complex parser with variable setting. * @{ */ /** @name Per-key flags * @{ */ /** The pointer is absolute */ #define CF_VAL_ABS 0 /** The pointer is relative to base */ #define CF_VAL_REL (1<<1) /** Value must not be changed on reload */ #define CF_NO_RELOAD (1<<2) /** Value can only be read */ #define CF_READONLY (1<<3) /** @} */ /** * Helper structure for passing key info to CfOps */ struct CfValue { void *value_p; const void *extra; const char *key_name; char *buf; int buflen; }; /** * Callbacks for setting and getting a variable value. * * Getter requires temp buf, returns string pointer, which * may or may not point to temp buf. Getter is optional. */ struct CfOps { bool (*setter)(struct CfValue *cv, const char *value); const char *(*getter)(struct CfValue *cv); const void *op_extra; }; /** * Parameter description */ struct CfKey { /** Parameter name */ const char *key_name; /** Type-specific functions, called with absolute pointer */ struct CfOps op; /** Flags: CF_VAL_ABS, CF_VAL_REL */ int flags; /** Absolute or relative offset */ uintptr_t key_ofs; /** Default value as string */ const char *def_value; }; /** * Section description */ struct CfSect { /** Section name */ const char *sect_name; /** Key list */ const struct CfKey *key_list; /** Get base pointer to dynamic sections (optional) */ void *(*base_lookup)(void *top_base, const char *sect_name); /** Set dynamic keys (optional) */ bool (*set_key)(void *base, const char *key, const char *val); /** Get dynamic keys (optional) */ const char *(*get_key)(void *base, const char *key, char *buf, int buflen); /** New section callback (optional) */ bool (*section_start)(void *top_base, const char *sect_name); }; /** * Top-level config information */ struct CfContext { /** Section list */ const struct CfSect *sect_list; /** Top-level base pointer, needed for relative addressing */ void *base; /** If set, then CF_NO_RELOAD keys cannot be changed anymore */ bool loaded; }; /** * @name Type-specific helpers * @{ */ /** Setter for string */ bool cf_set_str(struct CfValue *cv, const char *value); /** Setter for filename */ bool cf_set_filename(struct CfValue *cv, const char *value); /** Setter for int */ bool cf_set_int(struct CfValue *cv, const char *value); /** Setter for unsigned int */ bool cf_set_uint(struct CfValue *cv, const char *value); /** Setter for time-usec */ bool cf_set_time_usec(struct CfValue *cv, const char *value); /** Setter for time-double */ bool cf_set_time_double(struct CfValue *cv, const char *value); /** Setter for lookup */ bool cf_set_lookup(struct CfValue *cv, const char *value); /** Getter for string */ const char *cf_get_str(struct CfValue *cv); /** Getter for int */ const char *cf_get_int(struct CfValue *cv); /** Getter for unsigned int */ const char *cf_get_uint(struct CfValue *cv); /** Getter for time-usec */ const char *cf_get_time_usec(struct CfValue *cv); /** Getter for time-double */ const char *cf_get_time_double(struct CfValue *cv); /** Getter for int lookup */ const char *cf_get_lookup(struct CfValue *cv); /** @} */ /** * @name Shortcut CfOps for well-known types * @{ */ /** Ops for string */ #define CF_STR { cf_set_str, cf_get_str } /** Ops for filename */ #define CF_FILE { cf_set_filename, cf_get_str } /** Ops for integer */ #define CF_INT { cf_set_int, cf_get_int } /** Ops for unsigned integer */ #define CF_UINT { cf_set_uint, cf_get_uint } /** Ops for boolean */ #define CF_BOOL { cf_set_int, cf_get_int } /** Ops for time as usec */ #define CF_TIME_USEC { cf_set_time_usec, cf_get_time_usec } /** Ops for time as double */ #define CF_TIME_DOUBLE { cf_set_time_double, cf_get_time_double } /** Ops for lookup, takes table as argument */ #define CF_LOOKUP(t) { cf_set_lookup, cf_get_lookup, t } /** @} */ /** * Lookup entry for CF_LOOKUP table. */ struct CfLookup { const char *name; int value; }; /** * Helper to describe CfKey with absolute addressing */ #define CF_ABS(name, ops, var, flags, def) \ { name, ops, flags | CF_VAL_ABS, (uintptr_t)&(var), def } /** * Helper to describe CfKey with relative addressing. * * Before using it defined CF_REL_BASE to base struct. * * The var should be field in that struct. * * @code * struct Foo { * char *foo_name; * }; * #define CF_REL_BASE struct Foo * ... * CF_REL("name", CF_STR, foo_name, 0, NULL) * ... * #undef CF_REL_BASE * @endcode */ #define CF_REL(name, ops, var, flags, def) \ { name, ops, flags | CF_VAL_REL, offsetof(CF_REL_BASE, var), def } /** * Load config from file. */ bool cf_load_file(const struct CfContext *cf, const char *fn) _MUSTCHECK; /** * Get single value. */ const char *cf_get(const struct CfContext *cf, const char *sect, const char *var, char *buf, int buflen); /** * Set single value. */ bool cf_set(const struct CfContext *cf, const char *sect, const char *var, const char *val); /* @} */ #endif skytools-3.2.6/lib/usual/logging.c0000644000000000000000000001457612166266754014024 0ustar /* * Logging for unix service. * * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #ifdef HAVE_SYSLOG_H #include #endif #ifdef WIN32 #define LOG_EMERG 0 #define LOG_ALERT 1 #define LOG_CRIT 2 #define LOG_ERR 3 #define LOG_WARNING 4 #define LOG_NOTICE 5 #define LOG_INFO 6 #define LOG_DEBUG 7 #define LOG_PID 0 #define LOG_DAEMON 0 #define openlog(a,b,c) #define syslog win32_eventlog #define closelog() static void win32_eventlog(int level, const char *fmt, ...); #endif int cf_quiet = 0; int cf_verbose = 0; const char *cf_logfile = NULL; int cf_syslog = 0; const char *cf_syslog_ident = NULL; const char *cf_syslog_facility = NULL; enum LogLevel cf_syslog_level = LG_INFO; enum LogLevel cf_logfile_level = LG_NOISE; enum LogLevel cf_stderr_level = LG_NOISE; /* optional function to fill prefix */ logging_prefix_fn_t logging_prefix_cb; static FILE *log_file = NULL; static bool syslog_started = false; struct LevelInfo { const char *tag; int syslog_prio; }; static const struct LevelInfo log_level_list[] = { { "FATAL", LOG_CRIT }, /* LG_FATAL */ { "ERROR", LOG_ERR }, /* LG_ERROR */ { "WARNING", LOG_WARNING },/* LG_WARNING */ { "LOG", LOG_INFO }, /* LG_STATS*/ { "LOG", LOG_INFO }, /* LG_INFO */ { "DEBUG", LOG_DEBUG }, /* LG_DEBUG */ { "NOISE", LOG_DEBUG }, /* LG_NOISE */ }; struct FacName { const char *name; int code; }; static const struct FacName facility_names [] = { #ifndef WIN32 { "auth", LOG_AUTH }, #ifdef LOG_AUTHPRIV { "authpriv", LOG_AUTHPRIV }, #endif { "daemon", LOG_DAEMON }, { "user", LOG_USER }, { "local0", LOG_LOCAL0 }, { "local1", LOG_LOCAL1 }, { "local2", LOG_LOCAL2 }, { "local3", LOG_LOCAL3 }, { "local4", LOG_LOCAL4 }, { "local5", LOG_LOCAL5 }, { "local6", LOG_LOCAL6 }, { "local7", LOG_LOCAL7 }, #endif { NULL }, }; void reset_logging(void) { if (log_file) { fclose(log_file); log_file = NULL; } if (syslog_started) { closelog(); syslog_started = 0; } } static void start_syslog(void) { const struct FacName *f; int fac = LOG_DAEMON; const char *ident = cf_syslog_ident; if (!cf_syslog) return; if (cf_syslog_facility) { for (f = facility_names; f->name; f++) { if (strcmp(f->name, cf_syslog_facility) == 0) { fac = f->code; break; } } } if (!ident) { ident = getprogname(); if (!ident) ident = "unnamed"; } openlog(ident, LOG_PID, fac); syslog_started = 1; } void log_generic(enum LogLevel level, void *ctx, const char *fmt, ...) { char buf[2048], buf2[2048]; char ebuf[256]; char timebuf[64]; const struct LevelInfo *lev = &log_level_list[level]; unsigned pid = getpid(); va_list ap; int pfxlen = 0; int old_errno = errno; char *msg = buf; if (logging_prefix_cb) { pfxlen = logging_prefix_cb(level, ctx, buf, sizeof(buf)); if (pfxlen < 0) goto done; if (pfxlen >= (int)sizeof(buf)) pfxlen = sizeof(buf) - 1; } va_start(ap, fmt); vsnprintf(buf + pfxlen, sizeof(buf) - pfxlen, fmt, ap); va_end(ap); /* replace '\n' in message with '\n\t', strip trailing whitespace */ if (strchr(msg, '\n')) { char *dst = buf2, *end = buf2 + sizeof(buf2) - 2; for (; *msg && dst < end; msg++) { *dst++ = *msg; if (*msg == '\n') *dst++ = '\t'; } while (dst > buf2 && isspace(dst[-1])) dst--; *dst = 0; msg = buf2; } format_time_ms(0, timebuf, sizeof(timebuf)); if (!log_file && cf_logfile && cf_logfile[0]) { log_file = fopen(cf_logfile, "a"); if (log_file) { /* Got the file, disable buffering */ setvbuf(log_file, NULL, _IONBF, 0); } else { /* Unable to open, complain and fail */ fprintf(stderr, "%s %u %s Cannot open logfile: '%s': %s\n", timebuf, pid, log_level_list[0].tag, cf_logfile, strerror_r(errno, ebuf, sizeof(ebuf))); exit(1); } } if (!cf_quiet && level <= cf_stderr_level) fprintf(stderr, "%s %u %s %s\n", timebuf, pid, lev->tag, msg); if (log_file && level <= cf_logfile_level) fprintf(log_file, "%s %u %s %s\n", timebuf, pid, lev->tag, msg); if (cf_syslog && level <= cf_syslog_level) { if (!syslog_started) start_syslog(); syslog(lev->syslog_prio, "%s", msg); } done: if (old_errno != errno) errno = old_errno; } void log_fatal(const char *file, int line, const char *func, bool show_perror, void *ctx, const char *fmt, ...) { char buf[2048], ebuf[256]; const char *estr = NULL; int old_errno = 0; va_list ap; if (show_perror) { old_errno = errno; estr = strerror_r(errno, ebuf, sizeof(ebuf)); } va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); if (show_perror) { log_generic(LG_FATAL, ctx, "@%s:%d in function %s(): %s: %s [%d]", file, line, func, buf, estr, old_errno); } else { log_generic(LG_FATAL, ctx, "@%s:%d in function %s(): %s", file, line, func, buf); } } #ifdef WIN32 static void win32_eventlog(int level, const char *fmt, ...) { static HANDLE evtHandle = INVALID_HANDLE_VALUE; int elevel; char buf[1024]; const char *strlist[1] = { buf }; va_list ap; va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); switch (level) { case LOG_CRIT: case LOG_ERR: elevel = EVENTLOG_ERROR_TYPE; break; case LOG_WARNING: elevel = EVENTLOG_WARNING_TYPE; break; default: elevel = EVENTLOG_INFORMATION_TYPE; } if (evtHandle == INVALID_HANDLE_VALUE) { evtHandle = RegisterEventSource(NULL, cf_syslog_ident); if (evtHandle == NULL || evtHandle == INVALID_HANDLE_VALUE) { evtHandle = INVALID_HANDLE_VALUE; return; } } ReportEvent(evtHandle, elevel, 0, 0, NULL, 1, 0, strlist, NULL); } #endif skytools-3.2.6/lib/usual/event.h0000644000000000000000000001114512166266754013511 0ustar /* * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * libevent compat. * * This module adds few functions to older libevent versions, * or provides it's own libevent-compatible event loop * for cases where performance and features of full libevent * are not needed. */ #ifndef _USUAL_EVENT_H_ #define _USUAL_EVENT_H_ #include #ifdef HAVE_SYS_TIME_H #include #endif #ifdef HAVE_LIBEVENT /* * Real libevent */ #include #ifndef HAVE_EVENT_BASE_NEW /** Compat: make sure event_base_new() always available */ static inline struct event_base *event_base_new(void) { return event_init(); } #endif #ifndef HAVE_EVENT_LOOPBREAK /** Compat: dummy event_loopbreak for libevent 1.3 */ static inline void event_loopbreak(void) { } #endif #else /* * internal libevent */ #include #include /** * Flags for event_set() / event_assign(): * EV_READ, EV_WRITE, EV_SIGNAL, EV_PERSIST * * Flags given to user callback: * EV_READ, EV_WRITE, EV_SIGNAL, EV_TIMEOUT. */ enum EventFlags { EV_TIMEOUT = 1, EV_READ = 2, EV_WRITE = 4, EV_SIGNAL = 8, EV_PERSIST = 16, }; /** Flags for event_loop() */ enum EventLoopType { EVLOOP_ONCE = 1, EVLOOP_NONBLOCK = 2, }; /** Event context. event_base contents are not open */ struct event_base; /** user callback signature */ typedef void (*uevent_cb_f)(int fd, short flags, void *arg); /** Read fd value from struct event */ #define EVENT_FD(ev) ((ev)->fd) /** Read signal value from struct event */ #define EVENT_SIGNAL(ev) ((ev)->fd) /** * Event structure for internal event loop. * * Although the struct is open, no direct accesses should be done. * Thus also the fields are incompatible with libevent. */ struct event { /* node for fd or signal lists */ struct List node; /* timeout info */ usec_t timeout_val; int timeout_idx; /* back-pointer into pollfd list */ int ev_idx; /* event base it is attached to */ struct event_base *base; /* user callback */ uevent_cb_f cb_func; void *cb_arg; /* fd or signal */ int fd; /* both user and internal flags */ short flags; }; struct event_base *event_init(void) _MUSTCHECK; struct event_base *event_base_new(void) _MUSTCHECK; void event_base_free(struct event_base *base); void event_set(struct event *ev, int fd, short flags, uevent_cb_f cb, void *arg); int event_loop(int loop_flags) _MUSTCHECK; int event_loopbreak(void); int event_add(struct event *ev, struct timeval *timeout) _MUSTCHECK; int event_del(struct event *ev); void event_assign(struct event *ev, struct event_base *base, int fd, short flags, uevent_cb_f cb, void *cb_arg); int event_base_loop(struct event_base *base, int loop_flags) _MUSTCHECK; int event_base_loopbreak(struct event_base *base); #define evtimer_set(ev, cb, arg) event_set(ev, -1, 0, cb, arg) #define evtimer_add(ev, tv) event_add(ev, tv) #define evtimer_del(ev) event_del(ev) #define signal_set(ev, sig, cb, arg) event_set(ev, sig, EV_SIGNAL | EV_PERSIST, cb, arg) #define signal_add(ev, tv) event_add(ev, tv) #define signal_del(ev) event_del(ev) /* random compat */ int event_once(int fd, short flags, uevent_cb_f cb_func, void *cb_arg, struct timeval *timeout); int event_base_once(struct event_base *base, int fd, short flags, uevent_cb_f cb_func, void *cb_arg, struct timeval *timeout); int event_loopexit(struct timeval *timeout); int event_base_loopexit(struct event_base *base, struct timeval *timeout); int event_base_set(struct event_base *base, struct event *ev); const char *event_get_version(void); const char *event_get_method(void); /* pointless compat */ #define event_dispatch() event_loop(0) #define event_base_dispatch(base) event_base_loop(base, 0) #define event_initialized(ev) is_event_active(ev) #define signal_initialized(ev) is_event_active(ev) #define evtimer_initialized(ev) is_event_active(ev) int is_event_active(struct event *ev); #endif /* internal libevent */ #endif /* _USUAL_EVENT_H_ */ skytools-3.2.6/lib/usual/pgutil_kwlookup.g0000644000000000000000000000242512166266754015627 0ustar /* gperf header for kwlookup */ %language=ANSI-C %readonly-tables %pic %enum %define lookup-function-name pg_keyword_lookup_real %define hash-function-name pg_keyword_lookup_hash %define string-pool-name pgkw %% all analyse analyze and any array as asc asymmetric authorization between bigint binary bit boolean both case cast char character check coalesce collate column concurrently constraint create cross current_catalog current_date current_role current_schema current_time current_timestamp current_user dec decimal default deferrable desc distinct do else end except exists extract false fetch float for foreign freeze from full grant greatest group having ilike in initially inner inout int integer intersect interval into is isnull join leading least left like limit localtime localtimestamp national natural nchar new none not notnull null nullif numeric off offset old on only or order out outer over overlaps overlay placing position precision primary real references returning right row select session_user setof similar smallint some substring symmetric table then time timestamp to trailing treat trim true union unique user using values varchar variadic verbose when where window with xmlattributes xmlconcat xmlelement xmlexists xmlforest xmlparse xmlpi xmlroot xmlserialize skytools-3.2.6/lib/usual/utf8.c0000644000000000000000000000651212166266754013253 0ustar /* * Low-level UTF8 handling. * * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #define u8head(c, mask) (((c) & (mask | (mask >> 1))) == mask) #define u8tail(c) u8head(c, 0x80) /* * conservative utf8 decoder * * if invalid char, advance src pointer by one and return * negative byte value. this can be ignored or replaced. */ int utf8_get_char(const char **src_p, const char *_srcend) { uint32_t c; const uint8_t *srcend = (uint8_t *)_srcend; const uint8_t *p = (uint8_t *)(*src_p); /* * 0xxx xxxx -> len=1 * 10xx xxxx -> tail byte * 110x xxxx -> len=2 * 1110 xxxx -> len=3 * 1111 0xxx -> len=4 */ if (p[0] < 0x80) { c = *p++; } else if (u8head(p[0], 0xC0)) { if (p + 2 > srcend) goto eos; if (!u8tail(p[1])) goto bad_enc; c = ((p[0] & 0x1F) << 6) | (p[1] & 0x3F); if (c < 0x80) goto bad_enc; p += 2; } else if (u8head(p[0], 0xE0)) { if (p + 3 > srcend) goto eos; if (!u8tail(p[1]) || !u8tail(p[2])) goto bad_enc; c = ((p[0] & 0x0F) << 12) | ((p[1] & 0x3F) << 6) | (p[2] & 0x3F); if ((c < 0x800) || ((c & 0xF800) == 0xD800)) goto bad_enc; p += 3; } else if (u8head(p[0], 0xF0)) { if (p + 4 > srcend) goto eos; if (!u8tail(p[1]) || !u8tail(p[2]) || !u8tail(p[3])) goto bad_enc; c = ((p[0] & 0x07) << 18) | ((p[1] & 0x3F) << 12) | ((p[2] & 0x3F) << 6) | (p[3] & 0x3F); if (c < 0x10000 || c > 0x10FFFF) goto bad_enc; p += 4; } else { goto bad_enc; } *src_p = (char *)p; return c; bad_enc: eos: c = p[0]; *src_p = (char *)p + 1; return -(int)c; } /* encode one char - skip invalid ones */ bool utf8_put_char(unsigned int c, char **dst_p, const char *dstend) { char *dst = *dst_p; if (c < 0x80) { if (dst + 1 > dstend) goto no_room; *dst++ = c; } else if (c < 0x800) { if (dst + 2 > dstend) goto no_room; *dst++ = 0xC0 | (c >> 6); *dst++ = 0x80 | (c & 0x3F); } else if (c < 0x10000) { if (dst + 3 > dstend) goto no_room; if (c < 0xD800 || c > 0xDFFF) { *dst++ = 0xE0 | (c >> 12); *dst++ = 0x80 | ((c >> 6) & 0x3F); *dst++ = 0x80 | (c & 0x3F); } } else if (c <= 0x10FFFF) { if (dst + 4 > dstend) goto no_room; *dst++ = 0xF0 | (c >> 18); *dst++ = 0x80 | ((c >> 12) & 0x3F); *dst++ = 0x80 | ((c >> 6) & 0x3F); *dst++ = 0x80 | (c & 0x3F); } *dst_p = dst; return true; no_room: return false; } int utf8_char_size(unsigned int c) { if (c < 0x80) return 1; if (c < 0x800) return 2; if (c < 0x10000) return 3; return 4; } int utf8_seq_size(unsigned char b) { if (b < 0x80) return 1; if (b < 0xC2) return 0; if (b < 0xE0) return 2; if (b < 0xF0) return 3; if (b < 0xF5) return 4; return 0; } skytools-3.2.6/lib/usual/slab.h0000644000000000000000000000447312166266754013317 0ustar /* * Primitive slab allocator. * * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * * Slab allocator for same-size objects. * * Basic behaviour: * - On each alloc initializer is called. * - if init func is not given, memset() is done * - init func gets either zeroed obj or old obj from _free(). * 'struct List' on obj start is non-zero. * * ATM custom 'align' larger than malloc() alignment does not work. */ #ifndef _USUAL_SLAB_H_ #define _USUAL_SLAB_H_ #include /** Reference to main */ struct Slab; /** Signature for object init function */ typedef void (*slab_init_fn)(void *obj); /** Create new slab context for specific size */ struct Slab *slab_create(const char *name, unsigned obj_size, unsigned align, slab_init_fn init_func, CxMem *cx); /** Free whole context */ void slab_destroy(struct Slab *slab); /** Allocate single object from slab cache */ void *slab_alloc(struct Slab *slab) _MALLOC _MUSTCHECK; /** Release single object back */ void slab_free(struct Slab *slab, void *obj); /** Return sum of free and used objects */ int slab_total_count(const struct Slab *slab); /** Return number of free objects in cache */ int slab_free_count(const struct Slab *slab); /** Return number of used objects */ int slab_active_count(const struct Slab *slab); /** Signature for stat info callback */ typedef void (*slab_stat_fn)(void *arg, const char *slab_name, unsigned size, unsigned free, unsigned total); /** Run stat info callback on all slabs */ void slab_stats(slab_stat_fn cb_func, void *cb_arg); #endif skytools-3.2.6/lib/usual/fileutil.h0000644000000000000000000000332112166266754014202 0ustar /* * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * * File access utils. */ #ifndef _USUAL_FILEUTIL_H_ #define _USUAL_FILEUTIL_H_ #include #include /** Info about mapped file */ struct MappedFile { int fd; unsigned len; void *ptr; }; /** Signature for per-line callback */ typedef bool (*procline_cb)(void *arg, const char *line, ssize_t len); /** Read file into memory */ void *load_file(const char *fn, size_t *len_p); /** Loop over lines in file */ bool foreach_line(const char *fn, procline_cb proc_line, void *arg); /** Get file size */ ssize_t file_size(const char *fn); /** Map file into memory */ int map_file(struct MappedFile *m, const char *fname, int rw) _MUSTCHECK; /** Unmap previously mapped file */ void unmap_file(struct MappedFile *m); #if !defined(HAVE_GETLINE) #define getline(a,b,c) compat_getline(a,b,c) /** * Compat: Read line from file */ int getline(char **line_p, size_t *size_p, void *f); #endif #endif skytools-3.2.6/lib/usual/list.c0000644000000000000000000000373312166266754013342 0ustar /* * Circular doubly linked list implementation. * * Copyright (c) 2010 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include /* merge 2 ordered arrays into one */ static struct List *merge(list_cmp_f cmp_func, struct List *p, struct List *q) { struct List res[1], *tail = res, *e; while (p && q) { if (cmp_func(p, q) <= 0) { e = p; p = p->next; } else { e = q; q = q->next; } tail->next = e; tail = e; } tail->next = p ? p : q; return res->next; } /* * non-recursive merge sort * * uses singly-linked NULL-terminated arrays internally. */ void list_sort(struct List *list, list_cmp_f cmp_func) { int i, top = 0; struct List *p; struct List *stack[64]; if (list_empty(list)) return; /* merge small sorted fragments into larger ones */ while (list->next != list) { p = list->next; list->next = p->next; p->next = NULL; for (i = 0; (i < top) && stack[i]; i++) { p = merge(cmp_func, stack[i], p); stack[i] = NULL; } stack[i] = p; if (i == top) top++; } /* merge remaining fragments */ for (p = NULL, i = 0; i < top; i++) p = merge(cmp_func, stack[i], p); /* restore proper List */ list->next = p; for (p = list; p->next; p = p->next) p->next->prev = p; list->prev = p; p->next = list; } skytools-3.2.6/lib/usual/wchar.h0000644000000000000000000000242512166266754013475 0ustar /* * wchar.h - wchar_t utilities. * * Copyright (c) 2012 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _USUAL_WCHAR_H_ #define _USUAL_WCHAR_H_ #include #include #include wchar_t *mbstr_decode(const char *str, int str_len, int *wlen_p, wchar_t *wbuf, int wbuf_len, bool allow_invalid); wctype_t wctype_wcsn(const wchar_t *name, unsigned int namelen); #ifndef HAVE_MBSNRTOWCS #define mbsnrtowcs(a,b,c,d,e) usual_mbsnrtowcs(a,b,c,d,e) size_t mbsnrtowcs(wchar_t *dst, const char **src_p, size_t srclen, size_t dstlen, mbstate_t *ps); #endif #endif skytools-3.2.6/lib/usual/pgsocket.h0000644000000000000000000000572412166266754014215 0ustar /* * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** @file * * Async Postgres connection framework. */ #ifndef _USUAL_PGSOCKET_H_ #define _USUAL_PGSOCKET_H_ #include #include /** * Event types reported to user handler function. */ enum PgEvent { /** Connection establishing finished */ PGS_CONNECT_OK, /** Connection establishing failed */ PGS_CONNECT_FAILED, /** Got result from query either resultset or DB error */ PGS_RESULT_OK, /** Query execution failed */ PGS_RESULT_BAD, /** Wakeup from timed sleep */ PGS_TIMEOUT, }; struct PgSocket; struct event_base; typedef void (*pgs_handler_f)(struct PgSocket *pgs, void *arg, enum PgEvent dbev, PGresult *res); /** Create PgSocket. * * It does not launch connection yet, use \ref pgs_connect() for that. * * @param connstr libpq connect string * @param fn callback function for event handling * @param arg extra context for callback * @return Initialized PgSocket structure */ struct PgSocket *pgs_create(const char *connstr, pgs_handler_f fn, void *arg); /** Release PgSocket */ void pgs_free(struct PgSocket *db); /** Change the event base for PgSocket */ void pgs_set_event_base(struct PgSocket *pgs, struct event_base *base); /** Set connection lifetime (in seconds) */ void pgs_set_lifetime(struct PgSocket *pgs, double lifetime); /** Launch connection */ void pgs_connect(struct PgSocket *db); /** Drop connection */ void pgs_disconnect(struct PgSocket *db); /** Send simple query */ void pgs_send_query_simple(struct PgSocket *db, const char *query); /** Send extended query, args from varargs */ void pgs_send_query_params(struct PgSocket *db, const char *query, int nargs, ...); /** Send extended query, args from list */ void pgs_send_query_params_list(struct PgSocket *db, const char *query, int nargs, const char *argv[]); /** Ignore the connection for specified time */ void pgs_sleep(struct PgSocket *db, double timeout); /** Disconnect, sleep, reconnect */ void pgs_reconnect(struct PgSocket *db, double timeout); /** Does PgSocket have established connection */ int pgs_connection_valid(struct PgSocket *db); /** Return underlying Postgres connection */ PGconn *pgs_get_connection(struct PgSocket *db); bool pgs_waiting_for_reply(struct PgSocket *db); #endif skytools-3.2.6/lib/usual/config_msvc.h0000644000000000000000000000506112166266754014665 0ustar /* Define to 1 if you have the `event_base_new' function. */ #define HAVE_EVENT_BASE_NEW 1 /* Define to 1 if you have the `event_loopbreak' function. */ #define HAVE_EVENT_LOOPBREAK 1 /* Define to 1 if you have the header file. */ #define HAVE_MALLOC_H 1 /* Define to 1 if you have the header file. */ #define HAVE_MEMORY_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDINT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRING_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TYPES_H 1 /* Define to the address where bug reports for this package should be sent. */ #define PACKAGE_BUGREPORT "https://libusual.github.com" /* Define to the full name of this package. */ #define PACKAGE_NAME "libusual" /* Define to the full name and version of this package. */ #define PACKAGE_STRING "libusual 0.1" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "libusual" /* Define to the home page for this package. */ #define PACKAGE_URL "" /* Define to the version of this package. */ #define PACKAGE_VERSION "0.1" /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 /* Define to request cleaner win32 headers. */ #define WIN32_LEAN_AND_MEAN 1 /* Define to max win32 API version (0x0501=XP). */ //#define WINVER 0x0501 #define WINVER 0x0600 /* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most significant byte first (like Motorola and SPARC, unlike Intel). */ #if defined AC_APPLE_UNIVERSAL_BUILD # if defined __BIG_ENDIAN__ # define WORDS_BIGENDIAN 1 # endif #else # ifndef WORDS_BIGENDIAN /* # undef WORDS_BIGENDIAN */ # endif #endif /* Define to `int' if doesn't define. */ #define gid_t int /* Define to `__inline__' or `__inline' if that's what the C compiler calls it, or to nothing if 'inline' is not supported under any name. */ #ifndef __cplusplus #define inline __inline #endif /* Define to `int' if does not define. */ #define pid_t int /* Define to the equivalent of the C99 'restrict' keyword, or to nothing if this is not supported. Do not define if restrict is supported directly. */ #ifndef restrict #define restrict #endif /* Define to `int' if doesn't define. */ #define uid_t int #define _CRT_SECURE_NO_WARNINGS 1 #ifndef WIN32 #define WIN32 1 #endif skytools-3.2.6/lib/usual/daemon.c0000644000000000000000000001076312166266754013633 0ustar /* * Daemonization & pidfile handling. * * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include /* * pidfile management. */ static char *g_pidfile; static void remove_pidfile(void) { if (!g_pidfile) return; unlink(g_pidfile); free(g_pidfile); g_pidfile = NULL; } /* * Reads pid from pidfile and sends a signal to it. * * true - signaling was successful. * false - ENOENT / ESRCH * * fatal() otherwise. */ bool signal_pidfile(const char *pidfile, int sig) { char buf[128 + 1]; struct stat st; pid_t pid = 0; int fd, res; if (!pidfile || !pidfile[0]) return false; intr_loop: /* check if pidfile exists */ if (stat(pidfile, &st) < 0) goto fail; /* read old pid */ fd = open(pidfile, O_RDONLY); if (fd < 0) goto fail; res = read(fd, buf, sizeof(buf) - 1); close(fd); if (res <= 0) goto fail; /* parse pid */ buf[res] = 0; errno = 0; pid = strtoul(buf, NULL, 10); if (errno) { /* should we panic, or say no such process exists? */ if (0) errno = ESRCH; goto fail; } /* send the signal */ res = kill(pid, sig); if (res == 0) return true; fail: /* decide error seriousness */ if (errno == EINTR) goto intr_loop; if (errno == ENOENT || errno == ESRCH) return false; fatal_perror("signal_pidfile: Unexpected error"); } static void check_pidfile(const char *pidfile) { if (signal_pidfile(pidfile, 0)) fatal("pidfile exists, another instance running?"); if (errno == ESRCH) { log_info("Stale pidfile, removing"); unlink(pidfile); } } static void write_pidfile(const char *pidfile, bool first_write) { char buf[64]; pid_t pid; int res, fd, len; static int atexit_hook = 0; int flags = O_WRONLY | O_CREAT; if (!pidfile || !pidfile[0]) return; if (g_pidfile) free(g_pidfile); g_pidfile = strdup(pidfile); if (!g_pidfile) fatal_perror("cannot alloc pidfile"); pid = getpid(); snprintf(buf, sizeof(buf), "%u\n", (unsigned)pid); /* don't allow overwrite on first write */ if (first_write) flags |= O_EXCL; fd = open(pidfile, flags, 0644); if (fd < 0) fatal_perror("Cannot write pidfile: '%s'", pidfile); len = strlen(buf); loop: res = write(fd, buf, len); if (res < 0) { if (errno == EINTR) goto loop; fatal_perror("Write to pidfile failed: '%s'", pidfile); } else if (res < len) { len -= res; goto loop; } close(fd); if (!atexit_hook) { /* only remove when we have it actually written */ atexit(remove_pidfile); atexit_hook = 1; } } /* * Function: daemonize * * Handle pidfile and daemonization. * * If pidfile is given, check if old process is running. * * If going background is required, require non-empty pidfile * and logfile. Then fork to background and write pidfile. */ void daemonize(const char *pidfile, bool go_background) { int pid, fd; if (pidfile && pidfile[0]) { check_pidfile(pidfile); /* write pidfile twice, to be able to show problems to user */ write_pidfile(pidfile, true); } else if (go_background) fatal("daemon needs pidfile configured"); if (!go_background) return; if ((!cf_logfile || !cf_logfile[0]) && !cf_syslog) fatal("daemon needs logging configured"); /* send stdin, stdout, stderr to /dev/null */ fd = open("/dev/null", O_RDWR); if (fd < 0) fatal_perror("/dev/null"); dup2(fd, 0); dup2(fd, 1); dup2(fd, 2); if (fd > 2) close(fd); /* fork new process */ pid = fork(); if (pid < 0) fatal_perror("fork"); if (pid > 0) _exit(0); /* create new session */ pid = setsid(); if (pid < 0) fatal_perror("setsid"); /* fork again to avoid being session leader */ pid = fork(); if (pid < 0) fatal_perror("fork"); if (pid > 0) _exit(0); write_pidfile(pidfile, false); } skytools-3.2.6/lib/usual/ctype.h0000644000000000000000000000575212166266754013523 0ustar /* * ctype wrappers * * Copyright (c) 2011 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * ctype compat. * * Provides wrappers that make sure the functions work on 'char' values. * * @note * POSIX requires that these functions accept EOF/-1 in addition * to ordinary byte values. That means when working on 'char', * the functions cannot differetiate between 0xFF and EOF. * As no code should give EOF to functions and no code * should depend whether 0xFF is labeled ispunct() or not, * it seems no worthwhile to fix it. */ #ifndef _USUAL_CTYPE_H_ #define _USUAL_CTYPE_H_ #include #include #ifndef isblank #define isblank usual_isblank static inline int isblank(int c) { return (c == ' ') || (c == '\t'); } #endif /* keep right signature, cast to uchar internally */ #define _WRAP_CTYPE_FN(name) \ static inline int safe_ ## name (int c) { \ return name((unsigned char)(c)); \ } _WRAP_CTYPE_FN(isalnum) #undef isalnum /** Safe isalnum */ #define isalnum safe_isalnum _WRAP_CTYPE_FN(isalpha) #undef isalpha /** Safe isalpha */ #define isalpha safe_isalpha _WRAP_CTYPE_FN(isascii) #undef isascii /** Safe isascii */ #define isascii safe_isascii _WRAP_CTYPE_FN(isblank) #undef isblank /** Safe isblank */ #define isblank safe_isblank _WRAP_CTYPE_FN(iscntrl) #undef iscntrl /** Safe iscntrl */ #define iscntrl safe_iscntrl _WRAP_CTYPE_FN(isdigit) #undef isdigit /** Safe isdigit */ #define isdigit safe_isdigit _WRAP_CTYPE_FN(isgraph) #undef isgraph /** Safe isgraph */ #define isgraph safe_isgraph _WRAP_CTYPE_FN(islower) #undef islower /** Safe islower */ #define islower safe_islower _WRAP_CTYPE_FN(isprint) #undef isprint /** Safe isprint */ #define isprint safe_isprint _WRAP_CTYPE_FN(ispunct) #undef ispunct /** Safe ispunct */ #define ispunct safe_ispunct _WRAP_CTYPE_FN(isspace) #undef isspace /** Safe isspace */ #define isspace safe_isspace _WRAP_CTYPE_FN(isupper) #undef isupper /** Safe isupper */ #define isupper safe_isupper _WRAP_CTYPE_FN(isxdigit) #undef isxdigit /** Safe isxdigit */ #define isxdigit safe_isxdigit _WRAP_CTYPE_FN(tolower) #undef tolower /** Safe tolower */ #define tolower safe_tolower _WRAP_CTYPE_FN(toupper) #undef toupper /** Safe toupper */ #define toupper safe_toupper #undef _WRAP_CTYPE_FN #endif /* _USUAL_CTYPE_H_ */ skytools-3.2.6/lib/usual/heap.c0000644000000000000000000000736312166266754013307 0ustar /* * Binary Heap. * * Copyright (c) 2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include struct Heap { void **data; unsigned allocated; unsigned used; heap_is_better_f is_better; heap_save_pos_f save_pos; CxMem *cx; }; /* * Low-level operations. */ static unsigned get_parent(unsigned i) { return (i - 1) / 2; } static unsigned get_child(unsigned i, unsigned child_nr) { return 2*i + 1 + child_nr; } static bool is_better(struct Heap *h, unsigned i1, unsigned i2) { return h->is_better(h->data[i1], h->data[i2]); } static void set(struct Heap *h, unsigned i, void *ptr) { h->data[i] = ptr; if (h->save_pos) h->save_pos(ptr, i); } static void swap(struct Heap *h, unsigned i1, unsigned i2) { void *tmp = h->data[i1]; set(h, i1, h->data[i2]); set(h, i2, tmp); } static void bubble_up(struct Heap *h, unsigned i) { unsigned p; while (i > 0) { p = get_parent(i); if (!is_better(h, i, p)) break; swap(h, i, p); i = p; } } static void bubble_down(struct Heap *h, unsigned i) { unsigned c = get_child(i, 0); while (c < h->used) { if (c + 1 < h->used) { if (is_better(h, c + 1, c)) c = c + 1; } if (!is_better(h, c, i)) break; swap(h, i, c); i = c; c = get_child(i, 0); } } static void rebalance(struct Heap *h, unsigned pos) { if (pos == 0) { bubble_down(h, pos); } else if (pos == h->used - 1) { bubble_up(h, pos); } else if (is_better(h, pos, get_parent(pos))) { bubble_up(h, pos); } else { bubble_down(h, pos); } } /* * Actual API. */ struct Heap *heap_create(heap_is_better_f is_better_cb, heap_save_pos_f save_pos_cb, CxMem *cx) { struct Heap *h; h = cx_alloc0(cx, sizeof(*h)); if (!h) return NULL; h->save_pos = save_pos_cb; h->is_better = is_better_cb; h->cx = cx; return h; } void heap_destroy(struct Heap *h) { cx_free(h->cx, h->data); cx_free(h->cx, h); } bool heap_reserve(struct Heap *h, unsigned extra) { void *tmp; unsigned newalloc; if (h->used + extra < h->allocated) return true; newalloc = h->allocated * 2; if (newalloc < 32) newalloc = 32; if (newalloc < h->used + extra) newalloc = h->used + extra; tmp = cx_realloc(h->cx, h->data, newalloc * sizeof(void *)); if (!tmp) return false; h->data = tmp; h->allocated = newalloc; return true; } void *heap_top(struct Heap *h) { return (h->used > 0) ? h->data[0] : NULL; } bool heap_push(struct Heap *h, void *ptr) { unsigned pos; if (h->used >= h->allocated) { if (!heap_reserve(h, 1)) return false; } pos = h->used++; set(h, pos, ptr); bubble_up(h, pos); return true; } void *heap_remove(struct Heap *h, unsigned pos) { unsigned last; void *obj; if (pos >= h->used) return NULL; obj = h->data[pos]; last = --h->used; if (pos < last) { set(h, pos, h->data[last]); rebalance(h, pos); } h->data[last] = NULL; return obj; } void *heap_pop(struct Heap *h) { return heap_remove(h, 0); } unsigned heap_size(struct Heap *h) { return h->used; } void *heap_get_obj(struct Heap *h, unsigned pos) { if (pos < h->used) return h->data[pos]; return NULL; } skytools-3.2.6/lib/usual/wchar.c0000644000000000000000000000652212166266754013472 0ustar /* * wchar utility functions. * * Copyright (c) 2012 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include wchar_t *mbstr_decode(const char *str, int str_len, int *wlen_p, wchar_t *wbuf, int wbuf_len, bool allow_invalid) { mbstate_t ps; int clen; wchar_t *dst, *w, *wend; const char *s; const char *str_end; int wmax; if (str_len < 0) str_len = strlen(str); str_end = str + str_len; /* max number of wchar_t that the output can take plus zero-terminator */ wmax = str_len + 1; if (wbuf != NULL && wmax < wbuf_len) { dst = wbuf; } else { dst = malloc(sizeof(wchar_t) * wmax); if (!dst) return NULL; } /* try full decode at once */ s = str; memset(&ps, 0, sizeof(ps)); clen = mbsnrtowcs(dst, &s, str_len, wmax, &ps); if (clen >= 0) { if (wlen_p) *wlen_p = clen; dst[clen] = 0; return dst; } if (!allow_invalid) goto fail; /* full decode failed, decode chars one-by-one */ s = str; w = dst; wend = dst + wmax - 1; memset(&ps, 0, sizeof(ps)); while (s < str_end && w < wend) { clen = mbrtowc(w, s, str_end - s, &ps); if (clen > 0) { /* single char */ w++; s += clen; } else if (clen == 0) { /* string end */ break; } else if (allow_invalid) { /* allow invalid encoding */ memset(&ps, 0, sizeof(ps)); *w++ = (unsigned char)*s++; } else { /* invalid encoding */ goto fail; } } /* make sure we got string end */ if (s < str_end && *s != '\0') goto fail; *w = 0; if (wlen_p) *wlen_p = w - dst; return dst; fail: if (dst != wbuf) free(dst); return NULL; } wctype_t wctype_wcsn(const wchar_t *name, unsigned int namelen) { char buf[10]; unsigned int i; if (namelen >= sizeof(buf)) return (wctype_t)0; for (i = 0; i < namelen; i++) { wchar_t c = name[i]; if (c < 0x20 || c > 127) return (wctype_t)0; buf[i] = c; } buf[i] = 0; return wctype(buf); } #ifndef HAVE_MBSNRTOWCS size_t mbsnrtowcs(wchar_t *dst, const char **src_p, size_t srclen, size_t dstlen, mbstate_t *ps) { int clen; const char *s, *s_end; wchar_t *w; mbstate_t pstmp; size_t count = 0; if (!ps) { memset(&pstmp, 0, sizeof(pstmp)); ps = &pstmp; } s = *src_p; s_end = s + srclen; w = dst; while (s < s_end) { if (w && count >= dstlen) { /* dst is full */ break; } clen = mbrtowc(w, s, s_end - s, ps); if (clen > 0) { /* proper character */ if (w) w++; count++; s += clen; } else if (clen < 0) { /* invalid encoding */ *src_p = s; return (size_t)(-1); } else { /* end of string */ if (w) *w = 0; *src_p = NULL; return count; } } /* end due to srclen */ *src_p = s; return count; } #endif skytools-3.2.6/lib/usual/aatree.c0000644000000000000000000001644012166266754013627 0ustar /* * AA-Tree - Binary tree with embeddable nodes. * * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * Self-balancing binary tree. * * Here is an implementation of AA-tree (Arne Andersson tree) * which is simplification of Red-Black tree. * * Red-black tree has following properties that must be kept: * 1. A node is either red or black. * 2. The root is black. * 3. All leaves (NIL nodes) are black. * 4. Both childen of red node are black. * 5. Every path from root to leaf contains same number of black nodes. * * AA-tree adds additional property: * 6. Red node can exist only as a right node. * * Red-black tree properties quarantee that the longest path is max 2x longer * than shortest path (B-R-B-R-B-R-B vs. B-B-B-B) thus the tree will be roughly * balanced. Also it has good worst-case guarantees for insertion and deletion, * which makes it good tool for real-time applications. * * AA-tree removes most special cases from RB-tree, thus making resulting * code lot simpler. It requires slightly more rotations when inserting * and deleting but also keeps the tree more balanced. */ #include #include /* for NULL */ typedef struct AATree Tree; typedef struct AANode Node; /* * NIL node */ #define NIL ((struct AANode *)&_nil) static const struct AANode _nil = { NIL, NIL, 0 }; /* * Rebalancing. AA-tree needs only 2 operations * to keep the tree balanced. */ /* * Fix red on left. * * X Y * / --> \ * Y X * \ / * a a */ static inline Node * skew(Node *x) { Node *y = x->left; if (x->level == y->level && x != NIL) { x->left = y->right; y->right = x; return y; } return x; } /* * Fix 2 reds on right. * * X Y * \ / \ * Y --> X Z * / \ \ * a Z a */ static inline Node * split(Node *x) { Node *y = x->right; if (x->level == y->right->level && x != NIL) { x->right = y->left; y->left = x; y->level++; return y; } return x; } /* insert is easy */ static Node *rebalance_on_insert(Node *current) { return split(skew(current)); } /* remove is bit more tricky */ static Node *rebalance_on_remove(Node *current) { /* * Removal can create a gap in levels, * fix it by lowering current->level. */ if (current->left->level < current->level - 1 || current->right->level < current->level - 1) { current->level--; /* if ->right is red, change it's level too */ if (current->right->level > current->level) current->right->level = current->level; /* reshape, ask Arne about those */ current = skew(current); current->right = skew(current->right); current->right->right = skew(current->right->right); current = split(current); current->right = split(current->right); } return current; } /* * Recursive insertion */ static Node * insert_sub(Tree *tree, Node *current, uintptr_t value, Node *node) { int cmp; if (current == NIL) { /* * Init node as late as possible, to avoid corrupting * the tree in case it is already added. */ node->left = node->right = NIL; node->level = 1; tree->count++; return node; } /* recursive insert */ cmp = tree->node_cmp(value, current); if (cmp > 0) current->right = insert_sub(tree, current->right, value, node); else if (cmp < 0) current->left = insert_sub(tree, current->left, value, node); else /* already exists? */ return current; return rebalance_on_insert(current); } void aatree_insert(Tree *tree, uintptr_t value, Node *node) { tree->root = insert_sub(tree, tree->root, value, node); } /* * Recursive removal */ /* remove_sub could be used for that, but want to avoid comparisions */ static Node *steal_leftmost(Tree *tree, Node *current, Node **save_p) { if (current->left == NIL) { *save_p = current; return current->right; } current->left = steal_leftmost(tree, current->left, save_p); return rebalance_on_remove(current); } /* drop this node from tree */ static Node *drop_this_node(Tree *tree, Node *old) { Node *new = NIL; if (old->left == NIL) new = old->right; else if (old->right == NIL) new = old->left; else { /* * Picking nearest from right is better than from left, * due to asymmetry of the AA-tree. It will result in * less tree operations in the long run, */ old->right = steal_leftmost(tree, old->right, &new); /* take old node's place */ *new = *old; } /* cleanup for old node */ if (tree->release_cb) tree->release_cb(old, tree); tree->count--; return new; } static Node *remove_sub(Tree *tree, Node *current, uintptr_t value) { int cmp; /* not found? */ if (current == NIL) return current; cmp = tree->node_cmp(value, current); if (cmp > 0) current->right = remove_sub(tree, current->right, value); else if (cmp < 0) current->left = remove_sub(tree, current->left, value); else current = drop_this_node(tree, current); return rebalance_on_remove(current); } void aatree_remove(Tree *tree, uintptr_t value) { tree->root = remove_sub(tree, tree->root, value); } /* * Walking all nodes */ static void walk_sub(Node *current, enum AATreeWalkType wtype, aatree_walker_f walker, void *arg) { if (current == NIL) return; switch (wtype) { case AA_WALK_IN_ORDER: walk_sub(current->left, wtype, walker, arg); walker(current, arg); walk_sub(current->right, wtype, walker, arg); break; case AA_WALK_POST_ORDER: walk_sub(current->left, wtype, walker, arg); walk_sub(current->right, wtype, walker, arg); walker(current, arg); break; case AA_WALK_PRE_ORDER: walker(current, arg); walk_sub(current->left, wtype, walker, arg); walk_sub(current->right, wtype, walker, arg); break; } } /* walk tree in correct order */ void aatree_walk(Tree *tree, enum AATreeWalkType wtype, aatree_walker_f walker, void *arg) { walk_sub(tree->root, wtype, walker, arg); } /* walk tree in bottom-up order, so that walker can destroy the nodes */ void aatree_destroy(Tree *tree) { walk_sub(tree->root, AA_WALK_POST_ORDER, tree->release_cb, tree); /* reset tree */ tree->root = NIL; tree->count = 0; } /* prepare tree */ void aatree_init(Tree *tree, aatree_cmp_f cmpfn, aatree_walker_f release_cb) { tree->root = NIL; tree->count = 0; tree->node_cmp = cmpfn; tree->release_cb = release_cb; } /* * search function */ Node *aatree_search(Tree *tree, uintptr_t value) { Node *current = tree->root; while (current != NIL) { int cmp = tree->node_cmp(value, current); if (cmp > 0) current = current->right; else if (cmp < 0) current = current->left; else return current; } return NULL; } skytools-3.2.6/lib/usual/string.c0000644000000000000000000001375012166266754013675 0ustar /* * Random win32 compat. * * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include /* * Dynamic list of strings. */ struct StrList { struct StatList list; CxMem *ca; }; struct StrItem { struct List node; const char *str; }; bool strlist_empty(struct StrList *slist) { return statlist_empty(&slist->list); } bool strlist_append(struct StrList *slist, const char *str) { const char *nstr = NULL; bool ok; if (str) { nstr = cx_strdup(slist->ca, str); if (!nstr) return false; } ok = strlist_append_ref(slist, nstr); if (!ok) cx_free(slist->ca, nstr); return ok; } bool strlist_append_ref(struct StrList *slist, const char *str) { struct StrItem *item = cx_alloc(slist->ca, sizeof(*item)); if (!item) return false; list_init(&item->node); item->str = str; statlist_append(&slist->list, &item->node); return true; } const char *strlist_pop(struct StrList *slist) { struct StrItem *item; struct List *el; const char *str; el = statlist_pop(&slist->list); if (!el) return NULL; item = container_of(el, struct StrItem, node); str = item->str; cx_free(slist->ca, item); return str; } struct StrList *strlist_new(CxMem *ca) { struct StrList *slist = cx_alloc0(ca, sizeof(*slist)); if (!slist) return NULL; statlist_init(&slist->list, "strlist"); slist->ca = ca; return slist; } void strlist_free(struct StrList *slist) { const char *s; if (!slist) return; while (!strlist_empty(slist)) { s = strlist_pop(slist); if (s) cx_free(slist->ca, s); } cx_free(slist->ca, slist); } bool strlist_foreach(const struct StrList *slist, str_cb func, void *arg) { struct List *el; struct StrItem *item; statlist_for_each(el, &slist->list) { item = container_of(el, struct StrItem, node); if (!func(arg, item->str)) return false; } return true; } /* * Parse comma separated words. */ static inline const char *skip_ws(const char *p) { while (*p && isspace(*p)) p++; return p; } bool parse_word_list(const char *s, str_cb cb_func, void *cb_arg) { struct MBuf buf; const char *p = s; const char *start, *end; mbuf_init_dynamic(&buf); while (*p) { /* parse word */ p = skip_ws(p); start = p; while (*p && *p != ',') p++; end = p; while (end > start && isspace(*(end - 1))) end--; /* parse comma */ if (*p) { if (*p == ',') { p++; } else { goto failed_syntax; } } /* extract string */ if (!mbuf_write(&buf, start, end - start)) goto failed; if (!mbuf_write_byte(&buf, 0)) goto failed; /* launch callback */ if (!cb_func(cb_arg, (const char *)buf.data)) goto failed; /* reset */ mbuf_rewind_writer(&buf); } mbuf_free(&buf); return true; failed_syntax: errno = EINVAL; failed: mbuf_free(&buf); return false; } /* * Minimal spec-conforming implementations of strlcpy(), strlcat(). */ #ifndef HAVE_STRLCPY size_t strlcpy(char *dst, const char *src, size_t n) { size_t len = strlen(src); if (len < n) { memcpy(dst, src, len + 1); } else if (n > 0) { memcpy(dst, src, n - 1); dst[n - 1] = 0; } return len; } #endif #ifndef HAVE_STRLCAT size_t strlcat(char *dst, const char *src, size_t n) { size_t pos = 0; while (pos < n && dst[pos]) pos++; return pos + strlcpy(dst + pos, src, n - pos); } #endif #ifndef HAVE_MEMRCHR void *memrchr(const void *s, int c, size_t n) { const uint8_t *p = s; while (n--) { if (p[n] == c) return (void *)(p + n); } return NULL; } #endif #ifndef HAVE_BASENAME const char *basename(const char *path) { const char *p, *p2; static char buf[256]; unsigned len; if (path == NULL || path[0] == 0) return memcpy(buf, ".", 2); if ((p = strrchr(path, '/')) == NULL) return path; if (p[1]) return p + 1; /* last char is '/' */ for (p2 = p; p2 > path; p2--) { if (p2[-1] != '/') { len = p2 - path; if (len > sizeof(buf) - 1) len = sizeof(buf) - 1; memcpy(buf, p2 - len, len); buf[len] = 0; return basename(buf); } } /* path contains only '/' chars */ return p; } #endif #ifndef HAVE_DIRNAME const char *dirname(const char *path) { const char *p; size_t len; static char buf[1024]; if (path == NULL || path[0] == 0) return memcpy(buf, ".", 2); /* ignore tailing '/' */ len = strlen(path); while (len && path[len - 1] == '/') len--; if (!len) return memcpy(buf, "/", 2); /* find end of dirname, strip '/' */ if ((p = memrchr(path, '/', len)) == NULL) return memcpy(buf, ".", 2); len = p - path; while (len && path[len - 1] == '/') len--; if (!len) return memcpy(buf, "/", 2); /* return it */ if (len > sizeof(buf) - 1) { errno = ENAMETOOLONG; return NULL; } memcpy(buf, path, len); buf[len] = 0; return buf; } #endif #ifdef WIN32 const char *win32_strerror(int e) { static char buf[1024]; return strerror_r(e, buf, sizeof(buf)); } #endif /* restore original strerror_r() */ #undef strerror_r const char *usual_strerror_r(int e, char *dst, size_t dstlen) { #ifdef WIN32 FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM, NULL, e, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), dst, dstlen, NULL); #else /* !WIN32 */ #ifdef STRERROR_R_CHAR_P dst = strerror_r(e, dst, dstlen); #else if (strerror_r(e, dst, dstlen) != 0) strlcpy(dst, "ERR", dstlen); #endif #endif /* !WIN32 */ return dst; } skytools-3.2.6/lib/usual/mdict.c0000644000000000000000000001424612166266754013470 0ustar /* * A string to string dictionary. */ #include #include #include #include #include struct MDict { struct CBTree *tree; CxMem *cx; }; struct MDictElem { struct MBuf key; struct MBuf val; }; /* hook for CBTree */ static unsigned int mdict_getkey(void *ctx, void *obj, const void **dst_p) { struct MDictElem *el = obj; *dst_p = mbuf_data(&el->key); return mbuf_written(&el->key); } static bool mdict_free_obj(void *ctx, void *obj) { struct MDictElem *el = obj; struct MDict *dict = ctx; cx_free(dict->cx, mbuf_data(&el->key)); cx_free(dict->cx, mbuf_data(&el->val)); cx_free(dict->cx, el); return true; } struct MDict *mdict_new(CxMem *cx) { struct MDict *dict; dict = cx_alloc(cx, sizeof(struct MDict)); if (!dict) return NULL; dict->cx = cx; dict->tree = cbtree_create(mdict_getkey, mdict_free_obj, dict, cx); if (!dict->tree) { cx_free(cx, dict); return NULL; } return dict; } void mdict_free(struct MDict *dict) { if (dict) { cbtree_destroy(dict->tree); cx_free(dict->cx, dict); } } const struct MBuf *mdict_get_buf(struct MDict *dict, const char *key, unsigned klen) { struct MDictElem *el = cbtree_lookup(dict->tree, key, klen); if (!el) return NULL; return &el->val; } const char *mdict_get_str(struct MDict *dict, const char *key, unsigned klen) { const struct MBuf *val = mdict_get_buf(dict, key, klen); return val ? mbuf_data(val) : NULL; } bool mdict_put_str(struct MDict *dict, const char *key, unsigned klen, const char *val, unsigned vlen) { char *kptr, *vptr = NULL; struct MDictElem *el; if (val) { vptr = cx_alloc(dict->cx, vlen + 1); if (!vptr) return false; memcpy(vptr, val, vlen); vptr[vlen] = 0; } el = cbtree_lookup(dict->tree, key, klen); if (el) { cx_free(dict->cx, mbuf_data(&el->val)); mbuf_init_fixed_reader(&el->val, vptr, vlen); } else { kptr = cx_alloc(dict->cx, klen + 1); if (!kptr) return false; memcpy(kptr, key, klen); kptr[klen] = 0; el = cx_alloc(dict->cx, sizeof(*el)); if (!el) return false; mbuf_init_fixed_reader(&el->key, kptr, klen); mbuf_init_fixed_reader(&el->val, vptr, vlen); if (!cbtree_insert(dict->tree, el)) return false; } return true; } bool mdict_del_key(struct MDict *dict, const char *key, unsigned klen) { return cbtree_delete(dict->tree, key, klen); } /* * walk over key-val pairs */ struct WalkerCtx { mdict_walker_f cb_func; void *cb_arg; }; static bool walk_helper(void *arg, void *elem) { struct WalkerCtx *ctx = arg; struct MDictElem *el = elem; return ctx->cb_func(ctx->cb_arg, &el->key, &el->val); } bool mdict_walk(struct MDict *dict, mdict_walker_f cb_func, void *cb_arg) { struct WalkerCtx ctx; ctx.cb_func = cb_func; ctx.cb_arg = cb_arg; return cbtree_walk(dict->tree, walk_helper, &ctx); } /* * urldecode */ static int gethex(char c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; if (c >= 'A' && c <= 'F') return c - 'A' + 10; return -1; } static void *urldec_str(CxMem *cx, const char **src_p, const char *end, unsigned *len_p) { const char *s; char *d, *dst; int c, len = 0; /* estimate size */ for (s = *src_p; s < end; s++) { if (*s == '%') s += 2; else if (*s == '&' || *s == '=') break; len++; } /* allocate room */ d = dst = cx_alloc(cx, len + 1); if (!dst) return NULL; /* write out */ for (s = *src_p; s < end; ) { if (*s == '%') { if (s + 3 > end) goto err; c = gethex(s[1]) << 4; c |= gethex(s[2]); if (c < 0) goto err; s += 3; *d++ = c; } else if (*s == '+') { *d++ = ' '; s++; } else if (*s == '&' || *s == '=') { break; } else { *d++ = *s++; } } *d = 0; *len_p = d - dst; *src_p = s; return dst; err: cx_free(cx, dst); return NULL; } bool mdict_urldecode(struct MDict *dict, const char *str, unsigned len) { const char *s = str; const char *end = s + len; const char *k, *v; unsigned klen, vlen; struct MDictElem *el; while (s < end) { v = NULL; vlen = 0; el = NULL; /* read key */ k = urldec_str(dict->cx, &s, end, &klen); if (!k) goto fail; /* read value */ if (s < end && *s == '=') { s++; v = urldec_str(dict->cx, &s, end, &vlen); if (!v) goto fail; } if (s < end && *s == '&') s++; /* insert value */ el = cbtree_lookup(dict->tree, k, klen); if (el) { cx_free(dict->cx, mbuf_data(&el->val)); mbuf_init_fixed_reader(&el->val, v, vlen); } else { el = cx_alloc(dict->cx, sizeof(*el)); if (!el) goto fail; mbuf_init_fixed_reader(&el->key, k, klen); mbuf_init_fixed_reader(&el->val, v, vlen); if (!cbtree_insert(dict->tree, el)) goto fail; } } return true; fail: if (k) cx_free(dict->cx, k); if (v) cx_free(dict->cx, v); if (el) cx_free(dict->cx, el); return false; } /* * urlencode */ struct UrlEncCtx { struct MBuf *dst; bool is_first; }; static bool urlenc_str(struct MBuf *dst, const struct MBuf *str) { static const char hextbl[] = "0123456789abcdef"; unsigned len = mbuf_written(str); const unsigned char *s = mbuf_data(str); const unsigned char *end = s + len; bool ok; for (; s < end; s++) { if (*s == ' ') { ok = mbuf_write_byte(dst, '+'); } else if ((*s < 128) && isalnum(*s)) { ok = mbuf_write_byte(dst, *s); } else if (*s == '.' || *s == '_') { ok = mbuf_write_byte(dst, *s); } else { ok = mbuf_write_byte(dst, '%'); ok = ok && mbuf_write_byte(dst, hextbl[*s >> 4]); ok = ok && mbuf_write_byte(dst, hextbl[*s & 15]); } if (!ok) return false; } return true; } static bool urlenc_elem(void *arg, const struct MBuf *key, const struct MBuf *val) { struct UrlEncCtx *ctx = arg; bool ok; if (ctx->is_first) { ctx->is_first = false; } else { ok = mbuf_write_byte(ctx->dst, '&'); if (!ok) return false; } ok = urlenc_str(ctx->dst, key); if (!ok) return false; if (mbuf_data(val) != NULL) { ok = mbuf_write_byte(ctx->dst, '='); if (!ok) return false; ok = urlenc_str(ctx->dst, val); if (!ok) return false; } return true; } bool mdict_urlencode(struct MDict *dict, struct MBuf *dst) { struct UrlEncCtx ctx; ctx.is_first = true; ctx.dst = dst; return mdict_walk(dict, urlenc_elem, &ctx); } skytools-3.2.6/lib/usual/strpool.c0000644000000000000000000000557112166266754014073 0ustar /* * Pool for shared strings. * * Copyright (c) 2010 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include /* * Put all strings into cbtree. */ struct StrPool { CxMem *ca; struct CBTree *tree; int count; }; /* pass key info to cbtree */ static unsigned get_key(void *ctx, void *obj, const void **dst_p) { struct PStr *s = obj; *dst_p = s->str; return s->len; } /* free PStr obj */ static bool free_str(void *arg, void *obj) { struct PStr *p = obj; struct StrPool *sp = p->pool; memset(p, 0, offsetof(struct PStr, str) + 1); cx_free(sp->ca, obj); return true; } /* create main structure */ struct StrPool *strpool_create(CxMem *ca) { struct StrPool *sp; sp = cx_alloc(ca, sizeof(*sp)); if (!sp) return NULL; sp->count = 0; sp->ca = ca; sp->tree = cbtree_create(get_key, NULL, NULL, ca); if (!sp->tree) { cx_free(ca, sp); return NULL; } return sp; } /* free main structure */ void strpool_free(struct StrPool *sp) { if (sp) { cbtree_walk(sp->tree, free_str, sp); cbtree_destroy(sp->tree); cx_free(sp->ca, sp); } } /* return total count of strings in pool */ int strpool_total(struct StrPool *sp) { return sp->count; } /* get new reference to str */ struct PStr *strpool_get(struct StrPool *sp, const char *str, int len) { struct PStr *cstr; bool ok; if (len < 0) len = strlen(str); /* search */ cstr = cbtree_lookup(sp->tree, str, len); if (cstr) { cstr->refcnt++; return cstr; } /* create */ cstr = cx_alloc(sp->ca, sizeof(*cstr) + len + 1); if (!cstr) return NULL; cstr->pool = sp; cstr->refcnt = 1; cstr->len = len; memcpy(cstr->str, str, len + 1); /* insert */ ok = cbtree_insert(sp->tree, cstr); if (!ok) { cx_free(sp->ca, cstr); return NULL; } sp->count++; return cstr; } /* add reference */ void strpool_incref(struct PStr *s) { if (s) s->refcnt++; } /* drop reference, free if none left */ void strpool_decref(struct PStr *s) { struct StrPool *sp; if (!s) return; Assert(s->refcnt > 0); s->refcnt--; if (s->refcnt > 0) return; /* remove */ sp = s->pool; sp->count--; cbtree_delete(sp->tree, s->str, s->len); free_str(NULL, s); } skytools-3.2.6/lib/usual/string.h0000644000000000000000000000633412166266754013702 0ustar /* * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * \file * Theme include for strings. */ #ifndef _USUAL_STRING_H_ #define _USUAL_STRING_H_ #include #include /** * @name List of strings. * @{ */ /** Callback signature */ typedef bool (*str_cb)(void *arg, const char *s); struct StrList; /** Allocate new string list */ struct StrList *strlist_new(CxMem *ca); /** Free string string */ void strlist_free(struct StrList *slist); /** Check if empty */ bool strlist_empty(struct StrList *slist); /** Append copy of string. */ bool strlist_append(struct StrList *slist, const char *str); /** Append reference, strlist now owns it. */ bool strlist_append_ref(struct StrList *slist, const char *str); /** Call function on each element */ bool strlist_foreach(const struct StrList *slist, str_cb cb_func, void *cb_arg); /** Remove and return first element */ const char *strlist_pop(struct StrList *slist); /* @} */ /** Parse comma-separated elements from string and launch callback for each of them. */ bool parse_word_list(const char *s, str_cb cb_func, void *cb_arg); #ifndef HAVE_STRLCPY #define strlcpy(a,b,c) usual_strlcpy(a,b,c) /** Compat: Safely copy string to fixed-length buffer. */ size_t strlcpy(char *dst, const char *src, size_t n); #endif #ifndef HAVE_STRLCAT #define strlcat(a,b,c) usual_strlcat(a,b,c) /** Compat: Safely append string to fixed-length buffer. */ size_t strlcat(char *dst, const char *src, size_t n); #endif #ifndef HAVE_MEMRCHR #define memrchr(a,b,c) usual_memrchr(a,b,c) /** Compat: find byte in reverse direction */ void *memrchr(const void *s, int c, size_t n); #endif #ifndef HAVE_BASENAME #undef basename #define basename(a) usual_basename(a) /** Compat: Return pointer to last non-path element. Never modifies path, returns either pointer inside path or static buffer. */ const char *basename(const char *path); #endif #ifndef HAVE_DIRNAME #undef dirname #define dirname(a) usual_dirname(a) /** Compat: Return directory part of pathname. Never modifies path, returns either pointer inside path or static buffer. */ const char *dirname(const char *path); #endif /* * strerror, strerror_r */ #ifdef WIN32 const char *win32_strerror(int e); /** Compat: strerror() for win32 */ #define strerror(x) win32_strerror(x) #endif const char *usual_strerror_r(int e, char *dst, size_t dstlen); /** Compat: Provide GNU-style API: const char *strerror_r(int e, char *dst, size_t dstlen) */ #define strerror_r(a,b,c) usual_strerror_r(a,b,c) #endif skytools-3.2.6/lib/usual/socket_ntop.c0000644000000000000000000001235612166266754014720 0ustar /* $OpenBSD: inet_ntop.c,v 1.8 2008/12/09 19:38:38 otto Exp $ */ /* Copyright (c) 1996 by Internet Software Consortium. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. */ #include #include #ifndef HAVE_INET_NTOP #ifndef INADDRSZ #define INADDRSZ 4 #endif #ifndef IN6ADDRSZ #define IN6ADDRSZ 16 #endif #ifndef INT16SZ #define INT16SZ 2 #endif #define u_char uint8_t #define u_int unsigned int /* * WARNING: Don't even consider trying to compile this on a system where * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX. */ static const char *inet_ntop4(const u_char *src, char *dst, int size); static const char *inet_ntop6(const u_char *src, char *dst, int size); /* char * * inet_ntop(af, src, dst, size) * convert a network format address to presentation format. * return: * pointer to presentation format address (`dst'), or NULL (see errno). * author: * Paul Vixie, 1996. */ const char * inet_ntop(int af, const void *src, char *dst, int size) { if (size < 0) { errno = ENOSPC; return NULL; } switch (af) { case AF_INET: return (inet_ntop4(src, dst, size)); case AF_INET6: return (inet_ntop6(src, dst, size)); default: errno = EAFNOSUPPORT; return (NULL); } /* NOTREACHED */ } /* const char * * inet_ntop4(src, dst, size) * format an IPv4 address, more or less like inet_ntoa() * return: * `dst' (as a const) * notes: * (1) uses no statics * (2) takes a u_char* not an in_addr as input * author: * Paul Vixie, 1996. */ static const char * inet_ntop4(const u_char *src, char *dst, int size) { static const char fmt[] = "%u.%u.%u.%u"; char tmp[sizeof "255.255.255.255"]; int l; l = snprintf(tmp, sizeof(tmp), fmt, src[0], src[1], src[2], src[3]); if (l <= 0 || l >= size) { errno = ENOSPC; return (NULL); } strlcpy(dst, tmp, size); return (dst); } /* const char * * inet_ntop6(src, dst, size) * convert IPv6 binary address into presentation (printable) format * author: * Paul Vixie, 1996. */ static const char * inet_ntop6(const u_char *src, char *dst, int size) { /* * Note that int32_t and int16_t need only be "at least" large enough * to contain a value of the specified size. On some systems, like * Crays, there is no such thing as an integer variable with 16 bits. * Keep this in mind if you think this function should have been coded * to use pointer overlays. All the world's not a VAX. */ char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255"]; char *tp, *ep; struct { int base, len; } best, cur; u_int words[IN6ADDRSZ / INT16SZ]; int i; int advance; /* * Preprocess: * Copy the input (bytewise) array into a wordwise array. * Find the longest run of 0x00's in src[] for :: shorthanding. */ memset(words, '\0', sizeof words); for (i = 0; i < IN6ADDRSZ; i++) words[i / 2] |= (src[i] << ((1 - (i % 2)) << 3)); best.base = best.len = -1; cur.base = cur.len = -1; for (i = 0; i < (IN6ADDRSZ / INT16SZ); i++) { if (words[i] == 0) { if (cur.base == -1) cur.base = i, cur.len = 1; else cur.len++; } else { if (cur.base != -1) { if (best.base == -1 || cur.len > best.len) best = cur; cur.base = -1; } } } if (cur.base != -1) { if (best.base == -1 || cur.len > best.len) best = cur; } if (best.base != -1 && best.len < 2) best.base = -1; /* * Format the result. */ tp = tmp; ep = tmp + sizeof(tmp); for (i = 0; i < (IN6ADDRSZ / INT16SZ) && tp < ep; i++) { /* Are we inside the best run of 0x00's? */ if (best.base != -1 && i >= best.base && i < (best.base + best.len)) { if (i == best.base) { if (tp + 1 >= ep) return (NULL); *tp++ = ':'; } continue; } /* Are we following an initial run of 0x00s or any real hex? */ if (i != 0) { if (tp + 1 >= ep) return (NULL); *tp++ = ':'; } /* Is this address an encapsulated IPv4? */ if (i == 6 && best.base == 0 && (best.len == 6 || (best.len == 5 && words[5] == 0xffff))) { if (!inet_ntop4(src+12, tp, (size_t)(ep - tp))) return (NULL); tp += strlen(tp); break; } advance = snprintf(tp, ep - tp, "%x", words[i]); if (advance <= 0 || advance >= ep - tp) return (NULL); tp += advance; } /* Was it a trailing run of 0x00's? */ if (best.base != -1 && (best.base + best.len) == (IN6ADDRSZ / INT16SZ)) { if (tp + 1 >= ep) return (NULL); *tp++ = ':'; } if (tp + 1 >= ep) return (NULL); *tp++ = '\0'; /* * Check for overflow, copy, and we're done. */ if ((tp - tmp) > size) { errno = ENOSPC; return (NULL); } strlcpy(dst, tmp, size); return (dst); } #endif skytools-3.2.6/lib/usual/getopt.c0000644000000000000000000003245212166266754013671 0ustar /* $OpenBSD: getopt_long.c,v 1.24 2010/07/22 19:31:53 blambert Exp $ */ /* $NetBSD: getopt_long.c,v 1.15 2002/01/31 22:43:40 tv Exp $ */ /* * Copyright (c) 2002 Todd C. Miller * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * Sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F39502-99-1-0512. */ /*- * Copyright (c) 2000 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Dieter Baron and Thomas Klausner. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #ifdef NEED_USUAL_GETOPT #include #include char *optarg; /* argument associated with option */ int opterr = 1; /* if error message should be printed */ int optind = 1; /* index into parent argv vector */ int optopt = '?'; /* character checked for validity */ #define PRINT_ERROR ((opterr) && (*options != ':')) #define FLAG_PERMUTE 0x01 /* permute non-options to the end of argv */ #define FLAG_ALLARGS 0x02 /* treat non-options as args to option "-1" */ #define FLAG_LONGONLY 0x04 /* operate as getopt_long_only */ /* return values */ #define BADCH (int)'?' #define BADARG ((*options == ':') ? (int)':' : (int)'?') #define INORDER (int)1 #define EMSG "" static char *place = EMSG; /* option letter processing */ /* XXX: set optreset to 1 rather than these two */ static int nonopt_start = -1; /* first non option argument (for permute) */ static int nonopt_end = -1; /* first option after non options (for permute) */ /* Error messages */ static const char recargchar[] = "option requires an argument -- %c"; static const char recargstring[] = "option requires an argument -- %s"; static const char ambig[] = "ambiguous option -- %.*s"; static const char noarg[] = "option doesn't take an argument -- %.*s"; static const char illoptchar[] = "unknown option -- %c"; static const char illoptstring[] = "unknown option -- %s"; /* * Compute the greatest common divisor of a and b. */ static int gcd(int a, int b) { int c; c = a % b; while (c != 0) { a = b; b = c; c = a % b; } return (b); } /* * Exchange the block from nonopt_start to nonopt_end with the block * from nonopt_end to opt_end (keeping the same order of arguments * in each block). */ static void permute_args(int panonopt_start, int panonopt_end, int opt_end, char * const *nargv) { int cstart, cyclelen, i, j, ncycle, nnonopts, nopts, pos; char *swap; /* * compute lengths of blocks and number and size of cycles */ nnonopts = panonopt_end - panonopt_start; nopts = opt_end - panonopt_end; ncycle = gcd(nnonopts, nopts); cyclelen = (opt_end - panonopt_start) / ncycle; for (i = 0; i < ncycle; i++) { cstart = panonopt_end+i; pos = cstart; for (j = 0; j < cyclelen; j++) { if (pos >= panonopt_end) pos -= nnonopts; else pos += nopts; swap = nargv[pos]; /* LINTED const cast */ ((char **) nargv)[pos] = nargv[cstart]; /* LINTED const cast */ ((char **)nargv)[cstart] = swap; } } } /* * parse_long_options -- * Parse long options in argc/argv argument vector. * Returns -1 if short_too is set and the option does not match long_options. */ static int parse_long_options(char * const *nargv, const char *options, const struct option *long_options, int *idx, int short_too) { char *current_argv, *has_equal; size_t current_argv_len; int i, match; current_argv = place; match = -1; optind++; if ((has_equal = strchr(current_argv, '=')) != NULL) { /* argument found (--option=arg) */ current_argv_len = has_equal - current_argv; has_equal++; } else current_argv_len = strlen(current_argv); for (i = 0; long_options[i].name; i++) { /* find matching long option */ if (strncmp(current_argv, long_options[i].name, current_argv_len)) continue; if (strlen(long_options[i].name) == current_argv_len) { /* exact match */ match = i; break; } /* * If this is a known short option, don't allow * a partial match of a single character. */ if (short_too && current_argv_len == 1) continue; if (match == -1) /* partial match */ match = i; else { /* ambiguous abbreviation */ if (PRINT_ERROR) warnx(ambig, (int)current_argv_len, current_argv); optopt = 0; return (BADCH); } } if (match != -1) { /* option found */ if (long_options[match].has_arg == no_argument && has_equal) { if (PRINT_ERROR) warnx(noarg, (int)current_argv_len, current_argv); /* * XXX: GNU sets optopt to val regardless of flag */ if (long_options[match].flag == NULL) optopt = long_options[match].val; else optopt = 0; return (BADARG); } if (long_options[match].has_arg == required_argument || long_options[match].has_arg == optional_argument) { if (has_equal) optarg = has_equal; else if (long_options[match].has_arg == required_argument) { /* * optional argument doesn't use next nargv */ optarg = nargv[optind++]; } } if ((long_options[match].has_arg == required_argument) && (optarg == NULL)) { /* * Missing argument; leading ':' indicates no error * should be generated. */ if (PRINT_ERROR) warnx(recargstring, current_argv); /* * XXX: GNU sets optopt to val regardless of flag */ if (long_options[match].flag == NULL) optopt = long_options[match].val; else optopt = 0; --optind; return (BADARG); } } else { /* unknown option */ if (short_too) { --optind; return (-1); } if (PRINT_ERROR) warnx(illoptstring, current_argv); optopt = 0; return (BADCH); } if (idx) *idx = match; if (long_options[match].flag) { *long_options[match].flag = long_options[match].val; return (0); } else return (long_options[match].val); } /* * getopt_internal -- * Parse argc/argv argument vector. Called by user level routines. */ static int getopt_internal(int nargc, char * const *nargv, const char *options, const struct option *long_options, int *idx, int flags) { char *oli; /* option letter list index */ int optchar, short_too; static int posixly_correct = -1; int optreset = 0; if (options == NULL) return (-1); /* * Disable GNU extensions if POSIXLY_CORRECT is set or options * string begins with a '+'. */ if (posixly_correct == -1) posixly_correct = (getenv("POSIXLY_CORRECT") != NULL); if (posixly_correct || *options == '+') flags &= ~FLAG_PERMUTE; else if (*options == '-') flags |= FLAG_ALLARGS; if (*options == '+' || *options == '-') options++; /* * reset if requested */ if (optind == 0) optind = optreset = 1; optarg = NULL; if (optreset) nonopt_start = nonopt_end = -1; start: if (optreset || !*place) { /* update scanning pointer */ optreset = 0; if (optind >= nargc) { /* end of argument vector */ place = EMSG; if (nonopt_end != -1) { /* do permutation, if we have to */ permute_args(nonopt_start, nonopt_end, optind, nargv); optind -= nonopt_end - nonopt_start; } else if (nonopt_start != -1) { /* * If we skipped non-options, set optind * to the first of them. */ optind = nonopt_start; } nonopt_start = nonopt_end = -1; return (-1); } if (*(place = nargv[optind]) != '-' || (place[1] == '\0' && strchr(options, '-') == NULL)) { place = EMSG; /* found non-option */ if (flags & FLAG_ALLARGS) { /* * GNU extension: * return non-option as argument to option 1 */ optarg = nargv[optind++]; return (INORDER); } if (!(flags & FLAG_PERMUTE)) { /* * If no permutation wanted, stop parsing * at first non-option. */ return (-1); } /* do permutation */ if (nonopt_start == -1) nonopt_start = optind; else if (nonopt_end != -1) { permute_args(nonopt_start, nonopt_end, optind, nargv); nonopt_start = optind - (nonopt_end - nonopt_start); nonopt_end = -1; } optind++; /* process next argument */ goto start; } if (nonopt_start != -1 && nonopt_end == -1) nonopt_end = optind; /* * If we have "-" do nothing, if "--" we are done. */ if (place[1] != '\0' && *++place == '-' && place[1] == '\0') { optind++; place = EMSG; /* * We found an option (--), so if we skipped * non-options, we have to permute. */ if (nonopt_end != -1) { permute_args(nonopt_start, nonopt_end, optind, nargv); optind -= nonopt_end - nonopt_start; } nonopt_start = nonopt_end = -1; return (-1); } } /* * Check long options if: * 1) we were passed some * 2) the arg is not just "-" * 3) either the arg starts with -- we are getopt_long_only() */ if (long_options != NULL && place != nargv[optind] && (*place == '-' || (flags & FLAG_LONGONLY))) { short_too = 0; if (*place == '-') place++; /* --foo long option */ else if (*place != ':' && strchr(options, *place) != NULL) short_too = 1; /* could be short option too */ optchar = parse_long_options(nargv, options, long_options, idx, short_too); if (optchar != -1) { place = EMSG; return (optchar); } } if ((optchar = (int)*place++) == (int)':' || (optchar == (int)'-' && *place != '\0') || (oli = strchr(options, optchar)) == NULL) { /* * If the user specified "-" and '-' isn't listed in * options, return -1 (non-option) as per POSIX. * Otherwise, it is an unknown option character (or ':'). */ if (optchar == (int)'-' && *place == '\0') return (-1); if (!*place) ++optind; if (PRINT_ERROR) warnx(illoptchar, optchar); optopt = optchar; return (BADCH); } if (long_options != NULL && optchar == 'W' && oli[1] == ';') { /* -W long-option */ if (*place) /* no space */ /* NOTHING */; else if (++optind >= nargc) { /* no arg */ place = EMSG; if (PRINT_ERROR) warnx(recargchar, optchar); optopt = optchar; return (BADARG); } else /* white space */ place = nargv[optind]; optchar = parse_long_options(nargv, options, long_options, idx, 0); place = EMSG; return (optchar); } if (*++oli != ':') { /* doesn't take argument */ if (!*place) ++optind; } else { /* takes (optional) argument */ optarg = NULL; if (*place) /* no white space */ optarg = place; else if (oli[1] != ':') { /* arg not optional */ if (++optind >= nargc) { /* no arg */ place = EMSG; if (PRINT_ERROR) warnx(recargchar, optchar); optopt = optchar; return (BADARG); } else optarg = nargv[optind]; } place = EMSG; ++optind; } /* dump back option letter */ return (optchar); } /* * getopt -- * Parse argc/argv argument vector. */ int getopt(int nargc, char *nargv[], const char *options) { return getopt_internal(nargc, nargv, options, NULL, NULL, FLAG_PERMUTE); } /* * getopt_long -- * Parse argc/argv argument vector. */ int getopt_long(int nargc, char *nargv[], const char *options, const struct option *long_options, int *idx) { return (getopt_internal(nargc, nargv, options, long_options, idx, FLAG_PERMUTE)); } /* * getopt_long_only -- * Parse argc/argv argument vector. */ int getopt_long_only(int nargc, char *nargv[], const char *options, const struct option *long_options, int *idx) { return (getopt_internal(nargc, nargv, options, long_options, idx, FLAG_PERMUTE|FLAG_LONGONLY)); } #endif /* NEED_USUAL_GETOPT */ skytools-3.2.6/lib/usual/bits.h0000644000000000000000000000745412166266754013341 0ustar /* * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** @file * Bit arithmetics. * * - is_power_of_2 * - ffs, ffsl, ffsll * - fls, flsl, flsll * - rol16, rol32, rol64 * - ror16, ror32, ror64 */ #ifndef _USUAL_BITS_H_ #define _USUAL_BITS_H_ #include #include /** Checks if integer has only one bit set */ static inline int is_power_of_2(int n) { return (n > 0) && !(n & (n - 1)); } /* * Single-eval and type-safe rol/ror */ /** Rotate 16-bit int to left */ static inline uint16_t rol16(uint16_t v, int s) { return (v << s) | (v >> (16 - s)); } /** Rotate 32-bit int to left */ static inline uint32_t rol32(uint32_t v, int s) { return (v << s) | (v >> (32 - s)); } /** Rotate 64-bit int to left */ static inline uint64_t rol64(uint64_t v, int s) { return (v << s) | (v >> (64 - s)); } /** Rotate 16-bit int to right */ static inline uint16_t ror16(uint16_t v, int s) { return rol16(v, 16 - s); } /** Rotate 32-bit int to right */ static inline uint32_t ror32(uint32_t v, int s) { return rol32(v, 32 - s); } /** Rotate 64-bit int to right */ static inline uint64_t ror64(uint64_t v, int s) { return rol64(v, 64 - s); } /* * fls(int) * flsl(long) * flsll(long long) * * find MSB bit set, 1-based ofs, 0 if arg == 0 */ #if defined(__GNUC__) && (__GNUC__ >= 4) #define _FLS(sfx, type) \ return (x == 0) ? 0 : ((8*sizeof(type)) - __builtin_clz ## sfx(x)) #else #define _FLS(sfx, type) \ unsigned type u = x; \ unsigned int bit; \ if (x == 0) return 0; \ /* count from smallest bit, assuming small values */ \ for (bit = 1; u > 1; bit++) u >>= 1; \ return bit #endif #ifndef HAVE_FLS #define fls(x) usual_fls(x) /** Compat: Find last (MSB) set bit, 1-based ofs, 0 if arg == 0 */ static inline int fls(int x) { _FLS(, int); } #endif #ifndef HAVE_FLSL #define flsl(x) usual_flsl(x) /** Compat: Find last (MSB) set bit, 1-based ofs, 0 if arg == 0 */ static inline int flsl(long x) { _FLS(l, long); } #endif #ifndef HAVE_FLSLL #define flsll(x) usual_flsll(x) /** Compat: Find last (MSB) set bit, 1-based ofs, 0 if arg == 0 */ static inline int flsll(long long x) { _FLS(ll, long long); } #endif #undef _FLS /* * ffs(int) * ffsl(long) * ffsll(long long) * * find LSB bit set, 1-based ofs, 0 if arg == 0 */ #if defined(__GNUC__) && (__GNUC__ >= 4) #define _FFS(sfx, type) \ return __builtin_ffs ## sfx((unsigned type)(x)) #else #define _FFS(sfx, type) \ unsigned int bit; \ unsigned type u = x; \ if (!x) return 0; \ /* count from smallest bit, assuming small values */ \ for (bit = 1; !(u & 1); bit++) { \ u >>= 1; \ } \ return bit #endif #ifndef HAVE_FFS #define ffs(x) usual_ffs(x) /** Compat: Find first (LSB) set bit, 1-based ofs, 0 if arg == 0 */ static inline int ffs(int x) { _FFS(, int); } #endif #ifndef HAVE_FFSL #define ffsl(x) usual_ffsl(x) /** Compat: Find first (LSB) set bit, 1-based ofs, 0 if arg == 0 */ static inline int ffsl(long x) { _FFS(l, long); } #endif #ifndef HAVE_FFSLL #define ffsll(x) usual_ffsll(x) /** Compat: Find first (LSB) set bit, 1-based ofs, 0 if arg == 0 */ static inline int ffsll(long long x) { _FFS(ll, long long); } #endif #undef _FFS #endif skytools-3.2.6/lib/usual/err.c0000644000000000000000000000515212166266754013154 0ustar /* * Cmdline error reporting. * * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #ifndef HAVE_SETPROGNAME static const char *progname; #endif #ifndef HAVE_ERR void err(int e, const char *fmt, ...) { char buf[1024], ebuf[256]; va_list ap; int olderrno = errno; if (fmt) { va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); errx(e, "%s: %s", buf, strerror_r(olderrno, ebuf, sizeof(ebuf))); } else { errx(e, "%s", strerror_r(olderrno, ebuf, sizeof(ebuf))); } } #endif #ifndef HAVE_ERRX void errx(int e, const char *fmt, ...) { va_list ap; if (progname) fprintf(stderr, "%s: ", progname); if (fmt) { va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); } fprintf(stderr, "\n"); exit(e); } #endif #ifndef HAVE_WARN void warn(const char *fmt, ...) { char buf[1024], ebuf[256]; va_list ap; int olderrno = errno; if (fmt) { va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); warnx("%s: %s", buf, strerror_r(olderrno, ebuf, sizeof(ebuf))); } else { warnx("%s", strerror_r(olderrno, ebuf, sizeof(ebuf))); } } #endif #ifndef HAVE_WARNX void warnx(const char *fmt, ...) { va_list ap; if (progname) fprintf(stderr, "%s: ", progname); if (fmt) { va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); } } #endif #ifndef HAVE_SETPROGNAME void setprogname(const char *s) { const char *ss = strrchr(s, '/'); progname = ss ? (ss + 1) : s; } #endif #ifndef HAVE_GETPROGNAME const char *getprogname(void) { return progname; } #endif void *xmalloc(size_t len) { void *p = malloc(len); if (!p) err(1, "no mem"); return p; } void *xrealloc(void *p, size_t len) { void *p2 = realloc(p, len); if (!p2) err(1, "no mem"); return p2; } char *xstrdup(const char *s) { void *s2 = strdup(s); if (!s2) err(1, "no mem"); return s2; } skytools-3.2.6/lib/usual/list.h0000644000000000000000000000757512166266754013357 0ustar /* * Circular doubly linked list implementation. * * Copyright (c) 2007 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * Circular doubly linked list. */ #ifndef _USUAL_LIST_H_ #define _USUAL_LIST_H_ #include /** * Structure for both list nodes and heads. * * It is meant to be embedded in parent structure, * which can be acquired with container_of(). */ struct List { /** Pointer to next node or head. */ struct List *next; /** Pointer to previous node or head. */ struct List *prev; }; /** Define and initialize emtpy list head */ #define LIST(var) struct List var = { &var, &var } /** Initialize empty list head. */ static inline void list_init(struct List *list) { list->next = list->prev = list; } /** Is list empty? */ static inline int list_empty(const struct List *list) { return list->next == list; } /** Add item to the start of the list */ static inline struct List *list_prepend(struct List *list, struct List *item) { item->next = list->next; item->prev = list; list->next->prev = item; list->next = item; return item; } /** Add item to the end of the list */ static inline struct List *list_append(struct List *list, struct List *item) { item->next = list; item->prev = list->prev; list->prev->next = item; list->prev = item; return item; } /** Remove item from list */ static inline struct List *list_del(struct List *item) { item->prev->next = item->next; item->next->prev = item->prev; item->next = item->prev = item; return item; } /** Remove first from list and return */ static inline struct List *list_pop(struct List *list) { if (list_empty(list)) return NULL; return list_del(list->next); } /** Get first elem from list */ static inline struct List *list_first(const struct List *list) { if (list_empty(list)) return NULL; return list->next; } /** Get last elem from list */ static inline struct List *list_last(const struct List *list) { if (list_empty(list)) return NULL; return list->prev; } /** Remove first elem from list and return with casting */ #define list_pop_type(list, typ, field) \ (list_empty(list) ? NULL \ : container_of(list_del((list)->next), typ, field)) /** Loop over list */ #define list_for_each(item, list) \ for ((item) = (list)->next; \ (item) != (list); \ (item) = (item)->next) /** Loop over list backwards */ #define list_for_each_reverse(item, list) \ for ((item) = (list)->prev; \ (item) != (list); \ (item) = (item)->prev) /** Loop over list and allow removing item */ #define list_for_each_safe(item, list, tmp) \ for ((item) = (list)->next, (tmp) = (list)->next->next; \ (item) != (list); \ (item) = (tmp), (tmp) = (tmp)->next) /** Loop over list backwards and allow removing item */ #define list_for_each_reverse_safe(item, list, tmp) \ for ((item) = (list)->prev, (tmp) = (list)->prev->prev; \ (item) != (list); \ (item) = (tmp), (tmp) = (tmp)->prev) /** Comparator function signature for list_sort() */ typedef int (*list_cmp_f)(const struct List *a, const struct List *b); /** * Sort list. * * This implementation uses stable merge sort which operates in-place. */ void list_sort(struct List *list, list_cmp_f cmp_func); #endif skytools-3.2.6/lib/usual/dlfcn.h0000644000000000000000000000254212166266754013457 0ustar /* * Dynamic library loading. * * Copyright (c) 2007-2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _USUAL_DLFCN_H_ #define _USUAL_DLFCN_H_ #ifdef HAVE_DLFCN_H #include #elif defined(_WIN32) #define dlopen(a,b) usual_dlopen(a,b) #define dlsym(a,b) usual_dlsym(a,b) #define dlclose(a) usual_dlclose(a) #define dlerror(...) usual_dlerror(__VA_ARGS__) /* * win32: Minimal dlopen, dlsym, dlclose, dlerror compat. */ #define RTLD_LAZY 1 #define RTLD_NOW 2 void *dlopen(const char *fn, int flag); void *dlsym(void *hptr, const char *fname); int dlclose(void *hptr); const char *dlerror(void); #endif /* _WIN32 */ #endif /* !_USUAL_DLFCN_H_ */ skytools-3.2.6/lib/usual/strpool.h0000644000000000000000000000347412166266754014100 0ustar /* * Pool for shared strings. * * Copyright (c) 2010 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * * Storage for shared strings. * * This provides refcounted searchable string pool for cases * where lot of objects reference same strings. */ #ifndef _USUAL_STRPOOL_H_ #define _USUAL_STRPOOL_H_ #include /** Handle for the pool */ struct StrPool; /** Pooled String */ struct PStr { /** Parent pool */ struct StrPool *pool; /** Reference count */ int refcnt; /** String length */ int len; /** Zero-terminated value */ char str[FLEX_ARRAY]; }; /** Create new pool */ struct StrPool *strpool_create(CxMem *ca); /** Release pool */ void strpool_free(struct StrPool *sp); /** Return either existing or new PStr for given value */ struct PStr *strpool_get(struct StrPool *sp, const char *str, int len); /** Increase reference count for existing PStr */ void strpool_incref(struct PStr *str); /** Decrease reference count for existing PStr */ void strpool_decref(struct PStr *str); /** Return count of strings in the pool */ int strpool_total(struct StrPool *sp); #endif skytools-3.2.6/lib/usual/fileutil.c0000644000000000000000000000615212166266754014202 0ustar /* * File access utils. * * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #ifdef HAVE_SYS_MMAN_H #include #endif #include #include #include /* * Load text file into C string. */ void *load_file(const char *fn, size_t *len_p) { struct stat st; char *buf = NULL; int res; FILE *f; res = stat(fn, &st); if (res < 0) return NULL; buf = malloc(st.st_size + 1); if (!buf) return NULL; f = fopen(fn, "r"); if (!f) { free(buf); return NULL; } if ((res = fread(buf, 1, st.st_size, f)) < 0) { free(buf); fclose(f); return NULL; } fclose(f); buf[res] = 0; if (len_p) *len_p = res; return buf; } /* * Read file line-by-line, call user func on each. */ bool foreach_line(const char *fn, procline_cb proc_line, void *arg) { char *ln = NULL; size_t len = 0; ssize_t res; FILE *f = fopen(fn, "rb"); bool ok = false; if (!f) return false; while (1) { res = getline(&ln, &len, f); if (res < 0) { if (feof(f)) ok = true; break; } if (!proc_line(arg, ln, res)) break; } fclose(f); free(ln); return ok; } /* * Find file size. */ ssize_t file_size(const char *fn) { struct stat st; if (stat(fn, &st) < 0) return -1; return st.st_size; } /* * Map a file into mem. */ #ifdef HAVE_MMAP int map_file(struct MappedFile *m, const char *fname, int rw) { struct stat st; m->fd = open(fname, rw ? O_RDWR : O_RDONLY); if (m->fd < 0) return -1; if (fstat(m->fd, &st) < 0) { close(m->fd); return -1; } m->len = st.st_size; m->ptr = mmap(NULL, m->len, PROT_READ | (rw ? PROT_WRITE : 0), MAP_SHARED, m->fd, 0); if (m->ptr == MAP_FAILED) { close(m->fd); return -1; } return 0; } void unmap_file(struct MappedFile *m) { munmap(m->ptr, m->len); close(m->fd); m->ptr = NULL; m->fd = 0; } #endif #ifndef HAVE_GETLINE /* * Read line from FILE with dynamic allocation. */ int getline(char **line_p, size_t *size_p, void *_f) { FILE *f = _f; char *p; int len = 0; if (!*line_p || *size_p < 128) { p = realloc(*line_p, 512); if (!p) return -1; *line_p = p; *size_p = 512; } while (1) { p = fgets(*line_p + len, *size_p - len, f); if (!p) return len ? len : -1; len += strlen(p); if ((*line_p)[len - 1] == '\n') return len; p = realloc(*line_p, *size_p * 2); if (!p) return -1; *line_p = p; *size_p *= 2; } } #endif skytools-3.2.6/lib/usual/time.h0000644000000000000000000000474012166266754013331 0ustar /* * Theme include for time. * * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * * Time-related functionality. */ #ifndef _USUAL_TIME_H_ #define _USUAL_TIME_H_ #include #ifdef HAVE_SYS_TIME_H #include #endif #ifdef _WIN32 #include #endif #include /** Type to hold microseconds. */ typedef uint64_t usec_t; /** How many microseconds in a second. */ #define USEC ((usec_t)1000000) /** Convert usec timestamp to ISO timestamp with millisecond precision: YYYY-mm-dd hh:mm:ss.SSS */ char *format_time_ms(usec_t time, char *dst, unsigned dstlen); /** Convert usec timestamp to ISO timestamp with second precision: YYYY-mm-dd hh:mm:ss */ char *format_time_s(usec_t time, char *dst, unsigned dstlen); /** Query system time */ usec_t get_time_usec(void); /** Query cached system time */ usec_t get_cached_time(void); /** Forget cached system time, next call will fill it. */ void reset_time_cache(void); #ifdef WIN32 #ifndef HAVE_GETTIMEOFDAY #define gettimeofday(t,z) usual_gettimeofday(t,z) /** Compat: gettimeofday() */ int gettimeofday(struct timeval * tp, void * tzp); #endif #ifndef HAVE_LOCALTIME_R #define localtime_r(t,b) usual_localtime_r(t,b) /** Compat: localtime_r() */ struct tm *localtime_r(const time_t *tp, struct tm *buf); #endif #ifndef HAVE_USLEEP #define usleep(x) usual_usleep(x) /** Compat: usleep() */ static inline void usleep(long usec) { Sleep(usec / 1000); } #endif #ifndef HAVE_GETRUSAGE #define getrusage(w,d) usual_getrusage(w,d) #define RUSAGE_SELF 0 /** Compat: rusage for win32 */ struct rusage { struct timeval ru_utime; struct timeval ru_stime; }; /** Compat: getrusage() for win32 */ int getrusage(int who, struct rusage *dst); #endif #endif #endif skytools-3.2.6/lib/usual/regex.h0000644000000000000000000001533012166266754013502 0ustar /* * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * * POSIX regular expession API, provided by either libc or internally. * * The internal regex engine is only activated if OS does not provide * @ref uregex_links "" (eg. Windows) or if * --with-internal-regex is used when configuring @ref libusual. * * @section uregex Features of internal regex (uregex). * * Simple recursive matcher, only features are small size * and POSIX compatibility. Supports both Extended Regular Expressions (ERE) * and Basic Regular Expressions (BRE). * * @section uregex_syntax Supported syntax * @code * Both: . * ^ $ [] [[:cname:]] * ERE: () {} | + ? * BRE: \(\) \{\} \1-9 * @endcode * * With REG_RELAXED_SYNTAX, following common escapes will be available: * @code * Both: \b\B\d\D\s\S\w\W * BRE: \| * ERE: \1-9 * @endcode * * With REG_RELAXED_MATCHING it returns the first match found after applying * leftmost-longest to all elements. It skips the combinatorics to turn it * into guaranteed-longest match. * * @section uregex_skip Skipped POSIX features * - collation classes: [[. .]] * - equivalence classes: [[= =]] * - char ranges by locale order: [a-z] (byte order will be used) * - multi-byte chars: UTF-8 * * @section uregex_globaldefs Global defines * - USUAL_RELAXED_REGEX * - USE_INTERNAL_REGEX * * @section uregex_links Compatibility * * - * POSIX-2008 spec - by default uRegex run in mode where only * features specified by POSIX are available. * * - * AT\&T Research regex(3) regression tests - uRegex follows the interpretation * given there and fully passes the testsuite. */ #ifndef _USUAL_REGEX_H_ #define _USUAL_REGEX_H_ #include #if !defined(USE_INTERNAL_REGEX) && defined(HAVE_REGEX_H) && defined(HAVE_REGCOMP) #define USE_SYSTEM_REGEX #endif #ifdef USE_SYSTEM_REGEX #include #else /* * uRegex defines */ /** * @name Standard flags to regcomp() * @{ */ /** Use POSIX Extended Regex Syntax instead of Basic Syntax */ #define REG_EXTENDED (1 << 0) /** Do case-insensitive matching */ #define REG_ICASE (1 << 1) /** Do case-insensitive matching */ #define REG_NOSUB (1 << 2) /** Do case-insensitive matching */ #define REG_NEWLINE (1 << 3) /* @} */ /** * @name Standard flags to regexec() * @{ */ /** The start of string is not beginning of line, so ^ should not match */ #define REG_NOTBOL (1 << 4) /** The end of string is not end of line, so $ should not match */ #define REG_NOTEOL (1 << 5) /* @} */ /** * @name Standard error codes * @{ */ /** Match not found */ #define REG_NOMATCH 1 /** Bad {} repeat specification */ #define REG_BADBR 2 /** General problem with regular expression */ #define REG_BADPAT 3 /** Repeat used without preceding non-repeat element */ #define REG_BADRPT 4 /** Syntax error with {} */ #define REG_EBRACE 5 /** Syntax error with [] */ #define REG_EBRACK 6 /** Bad collation reference */ #define REG_ECOLLATE 7 /** Bad character class reference */ #define REG_ECTYPE 8 /** Trailing backslack */ #define REG_EESCAPE 9 /** Syntax error with () */ #define REG_EPAREN 10 /** Bad endpoint in range */ #define REG_ERANGE 11 /** No memory */ #define REG_ESPACE 12 /** Bad subgroup reference */ #define REG_ESUBREG 13 /* @} */ /** * @name Other defines * @{ */ #undef RE_DUP_MAX /** Max count user can enter via {} */ #define RE_DUP_MAX 0x7ffe /* @} */ /** * @name Non-standard flags for regcomp() * @{ */ /** * Allow few common non-standard escapes: * @code * \b - word-change * \B - not word change * \d - digit * \D - non-digit * \s - space * \S - non-space * \w - word char * \W - non-word char * \/ - / * @endcode */ #define REG_RELAXED_SYNTAX (1 << 14) /** * Dont permute groups in attempt to get longest match. * * May give minor speed win at the expense of strict * POSIX compatibility. */ #define REG_RELAXED_MATCHING (1 << 15) /** Turn on both REG_RELAXED_SYNTAX and REG_RELAXED_MATCHING */ #define REG_RELAXED (REG_RELAXED_SYNTAX | REG_RELAXED_MATCHING) /* @} */ /* turn them permanently on */ #ifdef USUAL_RELAXED_REGEX #undef REG_EXTENDED #define REG_EXTENDED (1 | REG_RELAXED) #endif /** * Compiled regex. * * It has only one standard field - re_nsub, * rest are implementation-specific. */ typedef struct { /** Number of subgroups in expression */ int re_nsub; void *internal; } regex_t; /** Type for offset in match */ typedef long regoff_t; /** Match location */ typedef struct { regoff_t rm_so; /**< Start offset */ regoff_t rm_eo; /**< End offset */ } regmatch_t; /* avoid name conflicts */ #define regcomp(a,b,c) usual_regcomp(a,b,c) #define regexec(a,b,c,d,e) usual_regexec(a,b,c,d,e) #define regerror(a,b,c,d) usual_regerror(a,b,c,d) #define regfree(a) usual_regfree(a) /** * Compile regex. * * @param rx Pre-allocated @ref regex_t structure to fill. * @param re Regex as zero-terminated string. * @param flags See above for regcomp() flags. */ int regcomp(regex_t *rx, const char *re, int flags); /** * Execute regex on a string. * * @param rx Regex previously initialized with regcomp() * @param str Zero-terminated string to match * @param nmatch Number of matches in pmatch * @param pmatch Array of matches. * @param eflags Execution flags. Supported flags: @ref REG_NOTBOL, @ref REG_NOTEOL */ int regexec(const regex_t *rx, const char *str, size_t nmatch, regmatch_t pmatch[], int eflags); /** * Give error description. * * @param err Error code returned by regcomp() or regexec() * @param rx Regex structure used in regcomp() or regexec() * @param dst Destination buffer * @param dstlen Size of dst */ size_t regerror(int err, const regex_t *rx, char *dst, size_t dstlen); /** * Free resources allocated by regcomp(). * @param rx Regex previously filled by regcomp() */ void regfree(regex_t *rx); #endif /* !USE_SYSTEM_REGEX */ #endif /* _USUAL_REGEX_H_ */ skytools-3.2.6/lib/usual/dlfcn.c0000644000000000000000000000240612166266754013451 0ustar /* * Dynamic library loading. * * Copyright (c) 2007-2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifdef _WIN32 #include /* * win32: Minimal dlopen, dlsym, dlclose, dlerror compat. */ void *dlopen(const char *fn, int flag) { HMODULE h = LoadLibraryEx(fn, NULL, 0); return h; } void *dlsym(void *hptr, const char *fname) { HMODULE h = hptr; FARPROC f = GetProcAddress(h, fname); return f; } int dlclose(void *hptr) { HMODULE h = hptr; return FreeLibrary(h) ? 0 : -1; } const char *dlerror(void) { return strerror(GetLastError()); } #endif skytools-3.2.6/lib/usual/socket_pton.c0000644000000000000000000001224012166266754014710 0ustar /* $OpenBSD: inet_pton.c,v 1.8 2010/05/06 15:47:14 claudio Exp $ */ /* Copyright (c) 1996 by Internet Software Consortium. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. */ #include #include #ifndef HAVE_INET_PTON #ifndef INADDRSZ #define INADDRSZ 4 #endif #ifndef IN6ADDRSZ #define IN6ADDRSZ 16 #endif #ifndef INT16SZ #define INT16SZ 2 #endif #define u_char uint8_t #define u_int unsigned int /* * WARNING: Don't even consider trying to compile this on a system where * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX. */ static int inet_pton4(const char *src, u_char *dst); static int inet_pton6(const char *src, u_char *dst); /* int * inet_pton(af, src, dst) * convert from presentation format (which usually means ASCII printable) * to network format (which is usually some kind of binary format). * return: * 1 if the address was valid for the specified address family * 0 if the address wasn't valid (`dst' is untouched in this case) * -1 if some other error occurred (`dst' is untouched in this case, too) * author: * Paul Vixie, 1996. */ int inet_pton(int af, const char *src, void *dst) { switch (af) { case AF_INET: return (inet_pton4(src, dst)); case AF_INET6: return (inet_pton6(src, dst)); default: errno = EAFNOSUPPORT; return (-1); } /* NOTREACHED */ } /* int * inet_pton4(src, dst) * like inet_aton() but without all the hexadecimal and shorthand. * return: * 1 if `src' is a valid dotted quad, else 0. * notice: * does not touch `dst' unless it's returning 1. * author: * Paul Vixie, 1996. */ static int inet_pton4(const char *src, u_char *dst) { static const char digits[] = "0123456789"; int saw_digit, octets, ch; u_char tmp[INADDRSZ], *tp; saw_digit = 0; octets = 0; *(tp = tmp) = 0; while ((ch = *src++) != '\0') { const char *pch; if ((pch = strchr(digits, ch)) != NULL) { u_int new = *tp * 10 + (pch - digits); if (new > 255) return (0); if (! saw_digit) { if (++octets > 4) return (0); saw_digit = 1; } *tp = new; } else if (ch == '.' && saw_digit) { if (octets == 4) return (0); *++tp = 0; saw_digit = 0; } else return (0); } if (octets < 4) return (0); memcpy(dst, tmp, INADDRSZ); return (1); } /* int * inet_pton6(src, dst) * convert presentation level address to network order binary form. * return: * 1 if `src' is a valid [RFC1884 2.2] address, else 0. * notice: * does not touch `dst' unless it's returning 1. * credit: * inspired by Mark Andrews. * author: * Paul Vixie, 1996. */ static int inet_pton6(const char *src, u_char *dst) { static const char xdigits_l[] = "0123456789abcdef", xdigits_u[] = "0123456789ABCDEF"; u_char tmp[IN6ADDRSZ], *tp, *endp, *colonp; const char *xdigits, *curtok; int ch, saw_xdigit, count_xdigit; u_int val; memset((tp = tmp), '\0', IN6ADDRSZ); endp = tp + IN6ADDRSZ; colonp = NULL; /* Leading :: requires some special handling. */ if (*src == ':') if (*++src != ':') return (0); curtok = src; saw_xdigit = count_xdigit = 0; val = 0; while ((ch = *src++) != '\0') { const char *pch; if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL) pch = strchr((xdigits = xdigits_u), ch); if (pch != NULL) { if (count_xdigit >= 4) return (0); val <<= 4; val |= (pch - xdigits); if (val > 0xffff) return (0); saw_xdigit = 1; count_xdigit++; continue; } if (ch == ':') { curtok = src; if (!saw_xdigit) { if (colonp) return (0); colonp = tp; continue; } else if (*src == '\0') { return (0); } if (tp + INT16SZ > endp) return (0); *tp++ = (u_char) (val >> 8) & 0xff; *tp++ = (u_char) val & 0xff; saw_xdigit = 0; count_xdigit = 0; val = 0; continue; } if (ch == '.' && ((tp + INADDRSZ) <= endp) && inet_pton4(curtok, tp) > 0) { tp += INADDRSZ; saw_xdigit = 0; count_xdigit = 0; break; /* '\0' was seen by inet_pton4(). */ } return (0); } if (saw_xdigit) { if (tp + INT16SZ > endp) return (0); *tp++ = (u_char) (val >> 8) & 0xff; *tp++ = (u_char) val & 0xff; } if (colonp != NULL) { /* * Since some memmove()'s erroneously fail to handle * overlapping regions, we'll do the shift by hand. */ const int n = tp - colonp; int i; if (tp == endp) return (0); for (i = 1; i <= n; i++) { endp[- i] = colonp[n - i]; colonp[n - i] = 0; } tp = endp; } if (tp != endp) return (0); memcpy(dst, tmp, IN6ADDRSZ); return (1); } #endif skytools-3.2.6/lib/usual/netdb.c0000644000000000000000000001104612166266754013457 0ustar /* * libusual - Utility library for C * * Copyright (c) 2010 Marko Kreen, Skype Technologies * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include /* is compat function needed? */ #ifndef HAVE_GETADDRINFO_A /* full compat if threads are available */ #ifdef HAVE_PTHREAD #include #include /* * Basic blocking lookup */ static void gaia_lookup(pthread_t origin, struct gaicb *list[], int nitems, struct sigevent *sevp) { struct gaicb *g; int i, res; for (i = 0; i < nitems; i++) { g = list[i]; res = getaddrinfo(g->ar_name, g->ar_service, g->ar_request, &g->ar_result); g->_state = res; } if (!sevp || sevp->sigev_notify == SIGEV_NONE) { /* do nothing */ } else if (sevp->sigev_notify == SIGEV_SIGNAL) { /* send signal */ pthread_kill(origin, sevp->sigev_signo); } else if (sevp->sigev_notify == SIGEV_THREAD) { /* call function */ sevp->sigev_notify_function(sevp->sigev_value); } } /* * Thread to run blocking lookup in */ struct GAIAContext { struct List req_list; pthread_cond_t cond; pthread_mutex_t lock; pthread_t thread; }; struct GAIARequest { struct List node; pthread_t origin; int nitems; struct sigevent sev; struct gaicb *list[FLEX_ARRAY]; }; #define RQ_SIZE(n) (offsetof(struct GAIARequest,list) + (n)*(sizeof(struct gaicb *))) static void gaia_lock_reqs(struct GAIAContext *ctx) { pthread_mutex_lock(&ctx->lock); } static void gaia_unlock_reqs(struct GAIAContext *ctx) { pthread_mutex_unlock(&ctx->lock); } static void *gaia_lookup_thread(void *arg) { struct GAIAContext *ctx = arg; struct GAIARequest *rq; struct List *el; gaia_lock_reqs(ctx); while (1) { el = list_pop(&ctx->req_list); if (!el) { pthread_cond_wait(&ctx->cond, &ctx->lock); continue; } gaia_unlock_reqs(ctx); rq = container_of(el, struct GAIARequest, node); gaia_lookup(rq->origin, rq->list, rq->nitems, &rq->sev); free(rq); gaia_lock_reqs(ctx); } return NULL; } /* * Functions run in user thread */ static int gaia_post_request(struct GAIAContext *ctx, struct gaicb *list[], int nitems, struct sigevent *sevp) { struct GAIARequest *rq; rq = malloc(RQ_SIZE(nitems)); if (!rq) return EAI_MEMORY; list_init(&rq->node); rq->origin = pthread_self(); rq->nitems = nitems; if (sevp) rq->sev = *sevp; else rq->sev.sigev_notify = SIGEV_NONE; memcpy(rq->list, list, sizeof(struct gaicb *)); gaia_lock_reqs(ctx); list_append(&ctx->req_list, &rq->node); gaia_unlock_reqs(ctx); pthread_cond_signal(&ctx->cond); return 0; } static struct GAIAContext *gaia_create_context(void) { struct GAIAContext *ctx; int err; ctx = malloc(sizeof(*ctx)); if (!ctx) return NULL; list_init(&ctx->req_list); err = pthread_cond_init(&ctx->cond, NULL); if (err) goto failed; err = pthread_mutex_init(&ctx->lock, NULL); if (err) goto failed; err = pthread_create(&ctx->thread, NULL, gaia_lookup_thread, ctx); if (err) goto failed; return ctx; failed: free(ctx); errno = err; return NULL; } /* * Final interface */ int getaddrinfo_a(int mode, struct gaicb *list[], int nitems, struct sigevent *sevp) { static struct GAIAContext *ctx; if (nitems <= 0) return 0; if (sevp && sevp->sigev_notify != SIGEV_NONE && sevp->sigev_notify != SIGEV_SIGNAL && sevp->sigev_notify != SIGEV_THREAD) goto einval; if (mode == GAI_WAIT) { gaia_lookup(pthread_self(), list, nitems, sevp); return 0; } else if (mode == GAI_NOWAIT) { if (!ctx) { ctx = gaia_create_context(); if (!ctx) return EAI_MEMORY; } return gaia_post_request(ctx, list, nitems, sevp); } einval: errno = EINVAL; return EAI_SYSTEM; } #else /* without threads not much to do */ int getaddrinfo_a(int mode, struct gaicb *list[], int nitems, struct sigevent *sevp) { errno = ENOSYS; return EAI_SYSTEM; } #endif /* !HAVE_PTHREAD_H */ #endif /* !HAVE_GETADDRINFO_A */ skytools-3.2.6/lib/usual/misc.h0000644000000000000000000000307712166266754013330 0ustar /* * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** @file * Random stuff that does not fit elsewhere. */ #ifndef _USUAL_MISC_H_ #define _USUAL_MISC_H_ #include #ifdef WORDS_BIGENDIAN #define FOURCC(a,b,c,d) \ ( ((unsigned int)(unsigned char)(a) << 24) \ | ((unsigned int)(unsigned char)(b) << 16) \ | ((unsigned int)(unsigned char)(c) << 8) \ | ((unsigned int)(unsigned char)(d))) #else /** Four-byte identifier as integer */ #define FOURCC(a,b,c,d) \ ( ((unsigned int)(unsigned char)(a)) \ | ((unsigned int)(unsigned char)(b) << 8) \ | ((unsigned int)(unsigned char)(c) << 16) \ | ((unsigned int)(unsigned char)(d) << 24)) #endif #if defined(__i386__) || defined(__x86_64__) #define mb() asm volatile("mfence":::"memory") #define rmb() asm volatile("lfence":::"memory") #define wmb() asm volatile("sfence":::"memory") #endif #endif skytools-3.2.6/lib/usual/daemon.h0000644000000000000000000000220512166266754013630 0ustar /** @file * Daemonization & pidfile handling. */ /* * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _USUAL_DAEMON_H_ #define _USUAL_DAEMON_H_ #include /** * Read a pid from pidfile and send a signal to it. */ bool signal_pidfile(const char *pidfile, int sig); /** * Daemonize process and write pidfile. */ void daemonize(const char *pidfile, bool go_background); #endif skytools-3.2.6/lib/usual/utf8.h0000644000000000000000000000345612166266754013264 0ustar /** @file * Low-level UTF8 handling. */ /* * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _USUAL_UTF8_H_ #define _USUAL_UTF8_H_ #include /** * Parse Unicode codepoint from UTF8 stream. * * On invalid UTF8 sequence returns negative byte value and * inreases src_p by one. * * @param src_p Location of data pointer. Will be incremented in-place. * @param srcend Pointer to end of data. * @return UNOCODE codepoint or negative byte value on error. */ int utf8_get_char(const char **src_p, const char *srcend); /** * Write Unicode codepoint as UTF8 sequence. * * Skips invalid Unicode values without error. * * @param c Unicode codepoint. * @param dst_p Location of dest pointer, will be increased in-place. * @param dstend Pointer to end of buffer. * @return false if not room, true otherwise. */ bool utf8_put_char(unsigned int c, char **dst_p, const char *dstend); /** Return UTF8 seq length based on unicode codepoint */ int utf8_char_size(unsigned int c); /** Return UTF8 seq length based on first byte */ int utf8_seq_size(unsigned char c); #endif skytools-3.2.6/lib/usual/aatree.h0000644000000000000000000000533512166266754013635 0ustar /* * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** @file * * AA-Tree - Binary tree with embeddable nodes. * * AA-Tree (Arne Andersson tree) is a simplified Red-Black tree. */ #ifndef _USUAL_AATREE_H_ #define _USUAL_AATREE_H_ #include struct AATree; struct AANode; /** Callback for node comparision against value */ typedef int (*aatree_cmp_f)(uintptr_t, struct AANode *node); /** Callback for walking the tree */ typedef void (*aatree_walker_f)(struct AANode *n, void *arg); /** * Tree header, for storing helper functions. */ struct AATree { struct AANode *root; int count; aatree_cmp_f node_cmp; aatree_walker_f release_cb; }; /** * Tree node. Embeddable, parent structure should be taken * with container_of(). * * Techinally, the full level is not needed and 2-lowest * bits of either ->left or ->right would be enough * to keep track of structure. Currently this is not * done to keep code simple. */ struct AANode { struct AANode *left; /**< smaller values */ struct AANode *right; /**< larger values */ int level; /**< number of black nodes to leaf */ }; /** * Walk order types. */ enum AATreeWalkType { AA_WALK_IN_ORDER = 0, /* left->self->right */ AA_WALK_PRE_ORDER = 1, /* self->left->right */ AA_WALK_POST_ORDER = 2, /* left->right->self */ }; /** Initialize structure */ void aatree_init(struct AATree *tree, aatree_cmp_f cmpfn, aatree_walker_f release_cb); /** Search for node */ struct AANode *aatree_search(struct AATree *tree, uintptr_t value); /** Insert new node */ void aatree_insert(struct AATree *tree, uintptr_t value, struct AANode *node); /** Remote node */ void aatree_remove(struct AATree *tree, uintptr_t value); /** Walk over all nodes */ void aatree_walk(struct AATree *tree, enum AATreeWalkType wtype, aatree_walker_f walker, void *arg); /** Free */ void aatree_destroy(struct AATree *tree); /** Check if terminal node. */ static inline int aatree_is_nil_node(const struct AANode *node) { return (node->left == node); } #endif skytools-3.2.6/lib/usual/mbuf.h0000644000000000000000000001726512166266754013332 0ustar /** \file * Safe and easy access to memory buffer. */ #ifndef _USUAL_MBUF_H_ #define _USUAL_MBUF_H_ #include #include /** MBuf structure. Allocated by user, can be in stack. */ struct MBuf { uint8_t *data; unsigned read_pos; unsigned write_pos; unsigned alloc_len; bool reader; bool fixed; }; /** Format fragment for *printf() */ #define MBUF_FMT ".*s" /** Argument layout for *printf() */ #define MBUF_ARG(m) ((m) ? mbuf_written(m) : 6), ((m) ? (const char *)mbuf_data(m) : "(null)") /* * Init functions */ /** Initialize R/O buffer to fixed memory area. */ static inline void mbuf_init_fixed_reader(struct MBuf *buf, const void *ptr, unsigned len) { buf->data = (uint8_t *)ptr; buf->read_pos = 0; buf->write_pos = len; buf->alloc_len = len; buf->reader = true; buf->fixed = true; } /** Initialize R/W buffer to fixed memory area. */ static inline void mbuf_init_fixed_writer(struct MBuf *buf, void *ptr, unsigned len) { buf->data = (uint8_t *)ptr; buf->read_pos = 0; buf->write_pos = 0; buf->alloc_len = len; buf->reader = false; buf->fixed = true; } /** Initialize R/W buffer to dynamically allocated memory area. */ static inline void mbuf_init_dynamic(struct MBuf *buf) { buf->data = NULL; buf->read_pos = 0; buf->write_pos = 0; buf->alloc_len = 0; buf->reader = false; buf->fixed = false; } /** Free dynamically allocated area, if exists. */ static inline void mbuf_free(struct MBuf *buf) { if (buf->data) { if (!buf->fixed) free(buf->data); memset(buf, 0, sizeof(*buf)); } } /* * Reset functions. */ /** Move read cursor to start of buffer. */ static inline void mbuf_rewind_reader(struct MBuf *buf) { buf->read_pos = 0; } /** Move both read and write cursor to start of buffer. */ static inline void mbuf_rewind_writer(struct MBuf *buf) { if (!buf->reader) { buf->read_pos = 0; buf->write_pos = 0; } } /* * Info functions. */ /** How many bytes can be read with read cursor. */ static inline unsigned mbuf_avail_for_read(const struct MBuf *buf) { return buf->write_pos - buf->read_pos; } /** How many bytes can be written with write cursor, without realloc. */ static inline unsigned mbuf_avail_for_write(const struct MBuf *buf) { if (!buf->reader && buf->alloc_len > buf->write_pos) return buf->alloc_len - buf->write_pos; return 0; } /** How many data bytes are in buffer. */ static inline unsigned mbuf_written(const struct MBuf *buf) { return buf->write_pos; } /** How many bytes have been read from buffer */ static inline unsigned mbuf_consumed(const struct MBuf *buf) { return buf->read_pos; } /** Return pointer to data area. */ static inline const void *mbuf_data(const struct MBuf *buf) { return buf->data; } /** Do the mbufs contain same data. */ static inline bool mbuf_eq(const struct MBuf *buf1, const struct MBuf *buf2) { if (buf1 == buf2) return true; if (!buf1 || !buf2 || (mbuf_written(buf1) != mbuf_written(buf2))) return false; return memcmp(mbuf_data(buf1), mbuf_data(buf2), mbuf_written(buf1)) == 0; } /** Complare mbuf to asciiz string */ static inline bool mbuf_eq_str(const struct MBuf *buf1, const char *s) { struct MBuf tmp; mbuf_init_fixed_reader(&tmp, s, strlen(s)); return mbuf_eq(buf1, &tmp); } /* * Read functions. */ /** Read a byte from read cursor. */ _MUSTCHECK static inline bool mbuf_get_byte(struct MBuf *buf, uint8_t *dst_p) { if (buf->read_pos + 1 > buf->write_pos) return false; *dst_p = buf->data[buf->read_pos++]; return true; } /** Read big-endian uint16 from read cursor. */ _MUSTCHECK static inline bool mbuf_get_char(struct MBuf *buf, char *dst_p) { if (buf->read_pos + 1 > buf->write_pos) return false; *dst_p = buf->data[buf->read_pos++]; return true; } _MUSTCHECK static inline bool mbuf_get_uint16be(struct MBuf *buf, uint16_t *dst_p) { unsigned a, b; if (buf->read_pos + 2 > buf->write_pos) return false; a = buf->data[buf->read_pos++]; b = buf->data[buf->read_pos++]; *dst_p = (a << 8) | b; return true; } /** Read big-endian uint32 from read cursor. */ _MUSTCHECK static inline bool mbuf_get_uint32be(struct MBuf *buf, uint32_t *dst_p) { unsigned a, b, c, d; if (buf->read_pos + 4 > buf->write_pos) return false; a = buf->data[buf->read_pos++]; b = buf->data[buf->read_pos++]; c = buf->data[buf->read_pos++]; d = buf->data[buf->read_pos++]; *dst_p = (a << 24) | (b << 16) | (c << 8) | d; return true; } /** Get reference to len bytes from read cursor. */ _MUSTCHECK static inline bool mbuf_get_uint64be(struct MBuf *buf, uint64_t *dst_p) { uint32_t a, b; if (!mbuf_get_uint32be(buf, &a) || !mbuf_get_uint32be(buf, &b)) return false; *dst_p = ((uint64_t)a << 32) | b; return true; } _MUSTCHECK static inline bool mbuf_get_bytes(struct MBuf *buf, unsigned len, const uint8_t **dst_p) { if (buf->read_pos + len > buf->write_pos) return false; *dst_p = buf->data + buf->read_pos; buf->read_pos += len; return true; } /** Get reference to asciiz string from read cursor. */ _MUSTCHECK static inline bool mbuf_get_chars(struct MBuf *buf, unsigned len, const char **dst_p) { if (buf->read_pos + len > buf->write_pos) return false; *dst_p = (char *)buf->data + buf->read_pos; buf->read_pos += len; return true; } _MUSTCHECK static inline bool mbuf_get_string(struct MBuf *buf, const char **dst_p) { const char *res = (char *)buf->data + buf->read_pos; const uint8_t *nul = memchr(res, 0, mbuf_avail_for_read(buf)); if (!nul) return false; *dst_p = res; buf->read_pos = nul + 1 - buf->data; return true; } /* * Write functions. */ /** Allocate more room if needed and the mbuf allows. */ _MUSTCHECK bool mbuf_make_room(struct MBuf *buf, unsigned len); /** Write a byte to write cursor. */ _MUSTCHECK static inline bool mbuf_write_byte(struct MBuf *buf, uint8_t val) { if (buf->write_pos + 1 > buf->alloc_len && !mbuf_make_room(buf, 1)) return false; buf->data[buf->write_pos++] = val; return true; } /** Write len bytes to write cursor. */ _MUSTCHECK static inline bool mbuf_write(struct MBuf *buf, const void *ptr, unsigned len) { if (buf->write_pos + len > buf->alloc_len && !mbuf_make_room(buf, len)) return false; memcpy(buf->data + buf->write_pos, ptr, len); buf->write_pos += len; return true; } /** writes full contents of another mbuf, without touching it */ _MUSTCHECK static inline bool mbuf_write_raw_mbuf(struct MBuf *dst, struct MBuf *src) { return mbuf_write(dst, src->data, src->write_pos); } /** writes partial contents of another mbuf, with touching it */ _MUSTCHECK static inline bool mbuf_write_mbuf(struct MBuf *dst, struct MBuf *src, unsigned len) { const uint8_t *data; if (!mbuf_get_bytes(src, len, &data)) return false; if (!mbuf_write(dst, data, len)) { src->read_pos -= len; return false; } return true; } /** Fiil mbuf with byte value */ _MUSTCHECK static inline bool mbuf_fill(struct MBuf *buf, uint8_t byte, unsigned len) { if (buf->write_pos + len > buf->alloc_len && !mbuf_make_room(buf, len)) return false; memset(buf->data + buf->write_pos, byte, len); buf->write_pos += len; return true; } /** remove some data from mbuf */ _MUSTCHECK static inline bool mbuf_cut(struct MBuf *buf, unsigned ofs, unsigned len) { if (buf->reader) return false; if (ofs + len < buf->write_pos) { unsigned endofs = ofs + len; memmove(buf->data + ofs, buf->data + endofs, buf->write_pos - endofs); buf->write_pos -= len; } else if (ofs < buf->write_pos) { buf->write_pos = ofs; } return true; } static inline void mbuf_copy(const struct MBuf *src, struct MBuf *dst) { *dst = *src; } _MUSTCHECK static inline bool mbuf_slice(struct MBuf *src, unsigned len, struct MBuf *dst) { if (len > mbuf_avail_for_read(src)) return false; mbuf_init_fixed_reader(dst, src->data + src->read_pos, len); src->read_pos += len; return true; } #endif skytools-3.2.6/lib/usual/regex.c0000644000000000000000000006724512166266754013511 0ustar /* * Small POSIX-only regex engine. * * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * Simple recursive matcher, only features are small size * and POSIX compatibility. * * ERE syntax: . * ^ $ [] [[:cname:]] () {} | + ? * BRE syntax: . * ^ $ [] [[:cname:]] \(\) \{\} \1-9 * * With REG_RELAXED_SYNTAX, following common escapes will be available: * \b\B\d\D\s\S\w\W BRE: \| ERE: \1-9 * * With REG_RELAXED_MATCHING it returns the first match found after applying * leftmost-longest to all elements. It skips the combinatorics to turn it * into guaranteed-longest match. * * Skipped POSIX features: * - collation classes: [[. .]] * - equivalence classes: [[= =]] * - char ranges by locale order: [a-z] (byte order will be used) * - multi-byte chars: UTF-8 */ #include #ifndef USE_SYSTEM_REGEX #include #include #include #include #undef STRICT /* either dynamic or static decision */ #define STRICT (ctx->strict) /* how many regmatch_t can be reported */ #define MAX_GROUPS 128 /* max count we want to store, means 'infinite' for simple atoms */ #define MAX_COUNT 0x7fff /* max count for simple atoms: char, any or class */ #define SIMPLE_MAXCNT(op) (((op)->maxcnt == MAX_COUNT) ? 0x7FFFFFFF : (op)->maxcnt) #define is_word(c) (isalnum(c) || (c) == '_') struct Op; struct ExecCtx; struct GMatch; /* Operation type */ enum OpType { /* ops that take count */ OP_CHAR, OP_ANY, OP_CLASS, OP_GROUP, OP_BREF, /* ops that dont take count */ OP_BOL, OP_EOL, OP_WCHANGE, OP_NWCHANGE, OP_GMATCH, OP_FULLMATCH, }; #define NONCOUNT_OPS_START OP_BOL /* regex_t->internal */ struct RegexInt { struct Op *root; struct Op *glist; struct MemPool *pool; int flags; }; /* match function and its setter */ typedef int (*matcher_f)(struct ExecCtx *ctx, const struct Op *op, const char *str, struct GMatch *gm); static void set_op_type(struct Op *op, enum OpType op_type); /* List of tokens to be AND-ed together */ struct AndList { struct AndList *next; struct Op *op_list; }; /* extra data for group Op */ struct GroupData { struct Op *parent; /* parent group or NULL for first group */ struct AndList *or_list;/* alternative AndLists */ struct Op *glist_prev; /* prev group Op */ bool has_refs; /* if bref references it */ }; /* char class data */ struct ClassData { uint32_t bitmap[256 / 32]; }; /* operation data */ struct Op { struct Op *next; matcher_f matcher; uint16_t mincnt; uint16_t maxcnt; uint8_t type; union { uint8_t grp_no; /* OP_GROUP: group nr, 0-toplevel */ char lit; /* OP_CHAR */ uint8_t bref; /* OP_BREF */ }; union { struct ClassData cdata; struct GroupData gdata; }; }; #define OP_BASE (offsetof(struct Op, cdata)) /* * Operations on ClassData */ static bool class_isset(const struct ClassData *cd, unsigned char c) { return cd->bitmap[c / 32] & (1 << (c % 32)); } static void class_set(struct ClassData *cd, unsigned char c) { cd->bitmap[c / 32] |= (1 << (c % 32)); } static void class_negate(struct ClassData *cd) { int i; class_set(cd, 0); for (i = 0; i < 256/32; i++) cd->bitmap[i] ^= -1; } /* * Parsing code */ /* top-level context */ struct ParseCtx { regex_t *rx; struct RegexInt *rxi; struct Op *last_group; struct AndList *last_andlist; struct Op *last_elem; /* last op in current OR branch */ bool gotcnt; /* count was attached to last op */ bool strict; /* strict syntax */ }; static struct AndList *new_andlist(struct ParseCtx *ctx, struct Op *g) { struct AndList *al = mempool_alloc(&ctx->rxi->pool, sizeof(*al)); if (!al) return NULL; if (ctx->last_andlist) { ctx->last_andlist->next = al; } else { g->gdata.or_list = al; } ctx->last_andlist = al; return al; } static struct Op *new_op(struct ParseCtx *ctx, enum OpType t, int extra) { struct Op *op = mempool_alloc(&ctx->rxi->pool, OP_BASE + extra); if (!op) return NULL; set_op_type(op, t); op->mincnt = op->maxcnt = 1; ctx->gotcnt = false; /* append */ if (ctx->last_elem) { ctx->last_elem->next = op; } else if (ctx->last_andlist) { ctx->last_andlist->op_list = op; } else if (ctx->last_group) { struct AndList *alist; alist = new_andlist(ctx, ctx->last_group); if (!alist) return NULL; alist->op_list = op; } ctx->last_elem = op; if (t == OP_GROUP) { struct Op *parent = ctx->last_group; int gno = ++ctx->rx->re_nsub; op->grp_no = gno; op->gdata.parent = parent; op->gdata.glist_prev = ctx->rxi->glist; ctx->rxi->glist = op; ctx->last_group = op; ctx->last_andlist = NULL; ctx->last_elem = NULL; if (!ctx->rxi->root) ctx->rxi->root = op; } return op; } static int op_char(struct ParseCtx *ctx, unsigned c) { struct Op *op = new_op(ctx, OP_CHAR, 0); if (!op) return REG_ESPACE; op->lit = c; if ((ctx->rxi->flags & REG_ICASE) && isalpha(c)) op->lit = tolower(c); return 0; } static int op_bref(struct ParseCtx *ctx, unsigned c) { struct Op *g, *op; op = new_op(ctx, OP_BREF, 0); if (!op) return REG_ESPACE; op->bref = c - '0'; /* check if valid ref */ for (g = ctx->last_group; g; g = g->gdata.parent) { if (g->grp_no == op->bref) return REG_ESUBREG; } /* tag the group as referenced */ for (g = ctx->rxi->glist; g; g = g->gdata.glist_prev) { if (g->grp_no == op->bref) { g->gdata.has_refs = true; return 0; } } return REG_ESUBREG; } static int op_simple(struct ParseCtx *ctx, enum OpType t) { struct Op *op = new_op(ctx, t, 0); if (!op) return REG_ESPACE; return 0; } static int op_count_simple(struct ParseCtx *ctx, int min, int max) { struct Op *op = ctx->last_elem; if (!op || ctx->gotcnt) return REG_BADRPT; if (op->type >= NONCOUNT_OPS_START) return REG_BADRPT; ctx->gotcnt = true; op->mincnt = min; op->maxcnt = max; return 0; } static int op_count_full(struct ParseCtx *ctx, const char **re) { unsigned a, b; char *end = (char *)*re; bool ext = ctx->rxi->flags & REG_EXTENDED; int err; /* apply sanity check */ err = op_count_simple(ctx, 1, 1); if (err) return err; /* parse */ a = b = strtoul(*re, &end, 10); if (end == *re) return REG_EBRACE; if (*end == ',') { *re = end + 1; end = (char*)*re; b = strtoul(*re, &end, 10); if (end == *re) b = MAX_COUNT; } if (a > b || b > MAX_COUNT || a >= MAX_COUNT) return REG_BADBR; /* check for correct termination */ if (ext && end[0] == '}') { *re = end + 1; goto done; } else if (!ext && end[0] == '\\' && end[1] == '}') { *re = end + 2; goto done; } /* bad fmt, decide between error codes */ for (a = 0; end[a] && a < 5; a++) { if (end[a] == '}') return REG_BADBR; } return REG_EBRACE; done: ctx->last_elem->mincnt = a; ctx->last_elem->maxcnt = b; return 0; } static int op_gstart(struct ParseCtx *ctx) { struct Op *op; op = new_op(ctx, OP_GROUP, sizeof(struct GroupData)); if (!op) return REG_ESPACE; if (op->grp_no >= MAX_GROUPS) return REG_BADPAT; return 0; } static int finish_branch(struct ParseCtx *ctx) { int err; /* disallow empty OR fragments, but not empty groups */ if (!ctx->last_elem && ctx->last_andlist && STRICT) return REG_BADPAT; if (ctx->last_group->gdata.parent) err = op_simple(ctx, OP_GMATCH); else err = op_simple(ctx, OP_FULLMATCH); if (err) return err; ctx->last_elem = NULL; return 0; } static int op_gend(struct ParseCtx *ctx) { struct Op *op = ctx->last_group; struct AndList *alist; int err; if (!op) return REG_EPAREN; err = finish_branch(ctx); if (err) return err; ctx->last_group = op->gdata.parent; ctx->last_elem = op; /* recover previous andlist... */ alist = ctx->last_group->gdata.or_list; while (alist && alist->next) alist = alist->next; ctx->last_andlist = alist; return 0; } static int op_or(struct ParseCtx *ctx) { struct Op *gop = ctx->last_group; struct AndList *alist; int err; /* disallow empty OR branches */ if (!ctx->last_elem && STRICT) return REG_BADPAT; /* start new branch */ err = finish_branch(ctx); if (err) return err; alist = new_andlist(ctx, gop); if (!alist) return REG_ESPACE; ctx->last_andlist = alist; ctx->last_elem = NULL; return 0; } /* * Parse bracketed classes. */ static void add_char(struct ClassData *cd, unsigned char c, bool icase) { if (icase && isalpha(c)) { class_set(cd, tolower(c)); class_set(cd, toupper(c)); } else { class_set(cd, c); } } struct NamedClass { const char name[7]; unsigned char name_len; int (*check_func)(int c); }; static const struct NamedClass ctype_list[] = { { "alnum", 5, isalnum }, { "alpha", 5, isalpha }, { "blank", 5, isblank }, { "cntrl", 5, iscntrl }, { "digit", 5, isdigit }, { "graph", 5, isgraph }, { "lower", 5, islower }, { "print", 5, isprint }, { "punct", 5, ispunct }, { "space", 5, isspace }, { "upper", 5, isupper }, { "xdigit", 6, isxdigit }, }; static int fill_class(struct ClassData *cd, const char *name, const char **s_p, bool icase) { unsigned c; const struct NamedClass *cc = ctype_list; for (c = 0; c < ARRAY_NELEM(ctype_list); c++) { cc = ctype_list + c; if (strncmp(name, cc->name, cc->name_len) != 0) continue; name += cc->name_len; if (name[0] == ':' && name[1] == ']') goto found; break; } return *name ? REG_ECTYPE : REG_EBRACK; found: /* fill map */ for (c = 1; c < 256; c++) { if (cc->check_func(c)) add_char(cd, c, icase); } *s_p = name + 2; return 0; } #define MAP_RANGE 0x7FFF0001 #define MAP_END 0x7FFF0002 #define MAP_OTHER 0x7FFF0003 static int get_map_token(struct ParseCtx *ctx, const char **s_p, unsigned *dst_p, bool start, struct ClassData *cd, bool icase) { const char *s = *s_p; unsigned res; if (*s == '-') { if (start || s[1] == ']') res = '-'; else res = MAP_RANGE; s += 1; } else if (*s == ']' && !start) { res = MAP_END; s++; } else if (*s == '[' && (s[1] == '.' || s[1] == ':' || s[1] == '=')) { if (s[1] == ':') { s += 2; *dst_p = MAP_OTHER; return fill_class(cd, s, s_p, icase); } return REG_BADPAT; } else { res = (unsigned char)*s++; } *dst_p = res; *s_p = s; return 0; } static int op_class(struct ParseCtx *ctx, const char **re) { const char *s = *re; struct ClassData *cd; struct Op *op; bool not = false, icase = ctx->rxi->flags & REG_ICASE; const char *start; unsigned tk, c, prevtk = 0; bool is_range = false; int err; if (*s == '^') { s++; not = true; } start = s; op = new_op(ctx, OP_CLASS, sizeof(struct ClassData)); if (!op) return REG_ESPACE; cd = &op->cdata; if (not && (ctx->rxi->flags & REG_NEWLINE)) class_set(cd, '\n'); while (*s) { err = get_map_token(ctx, &s, &tk, s == start, cd, icase); if (err) return err; if (tk == MAP_END) { if (prevtk) add_char(cd, prevtk, icase); goto done; } else if (tk == MAP_OTHER) { if (is_range) return REG_ERANGE; if (prevtk) add_char(cd, prevtk, icase); prevtk = 0; } else if (tk == MAP_RANGE) { if (!prevtk) return REG_ERANGE; is_range = true; } else if (is_range) { if (tk < prevtk) return REG_ERANGE; for (c = prevtk; c <= tk; c++) add_char(cd, c, icase); is_range = false; prevtk = 0; } else { if (prevtk) add_char(cd, prevtk, icase); prevtk = tk; } } return REG_EBRACK; done: *re = s; if (not) class_negate(cd); return 0; } static int op_class_const(struct ParseCtx *ctx, const char *def) { const char *p = def + 1; return op_class(ctx, &p); } /* * Top-level syntax */ static int parse_relaxed_escapes(struct ParseCtx *ctx, char c) { if (STRICT) return REG_BADPAT; switch (c) { case 'b': return op_simple(ctx, OP_WCHANGE); case 'B': return op_simple(ctx, OP_NWCHANGE); case 'w': return op_class_const(ctx, "[_[:alnum:]]"); case 'W': return op_class_const(ctx, "[^_[:alnum:]]"); case 'd': return op_class_const(ctx, "[[:digit:]]"); case 'D': return op_class_const(ctx, "[^[:digit:]]"); case 's': return op_class_const(ctx, "[[:space:]]"); case 'S': return op_class_const(ctx, "[^[:space:]]"); } return REG_BADPAT; } static int parse_posix_ext(struct ParseCtx *ctx, const char *re) { int err = 0; unsigned c; int glevel = 0; loop: if (err) return err; c = *re++; switch (c) { case 0: return (glevel == 0) ? 0 : REG_EPAREN; case '(': glevel++; err = op_gstart(ctx); break; case ')': if (glevel > 0) { glevel--; err = op_gend(ctx); } else { err = op_char(ctx, c); /* POSIX bug */ } break; case '|': err = op_or(ctx); break; case '*': err = op_count_simple(ctx, 0, MAX_COUNT); break; case '?': err = op_count_simple(ctx, 0, 1); break; case '+': err = op_count_simple(ctx, 1, MAX_COUNT); break; case '[': err = op_class(ctx, &re); break; case '{': err = op_count_full(ctx, &re); break; case '.': err = op_simple(ctx, OP_ANY); break; case '^': err = op_simple(ctx, OP_BOL); break; case '$': err = op_simple(ctx, OP_EOL); break; case '\\': goto escaped; default: err = op_char(ctx, c); } goto loop; escaped: c = *re++; if (c == 0) err = REG_EESCAPE; else if (c >= '0' && c <= '9') err = STRICT ? REG_BADPAT : op_bref(ctx, c); else if (isalpha(c)) err = parse_relaxed_escapes(ctx, c); else err = op_char(ctx, c); goto loop; } static int parse_posix_basic(struct ParseCtx *ctx, const char *re) { int err = 0; unsigned c; int glevel = 0; loop: if (err) return err; c = *re++; switch (c) { case 0: return (glevel == 0) ? 0 : REG_EPAREN; case '*': if (ctx->last_elem && ctx->last_elem->type != OP_BOL) err = op_count_simple(ctx, 0, MAX_COUNT); else err = op_char(ctx, '*'); break; case '.': err = op_simple(ctx, OP_ANY); break; case '[': err = op_class(ctx, &re); break; case '^': if (!ctx->last_elem) err = op_simple(ctx, OP_BOL); else err = op_char(ctx, c); break; case '$': if (!*re || (re[0] == '\\' && re[1] == ')')) err = op_simple(ctx, OP_EOL); else err = op_char(ctx, c); break; case '\\': goto escaped; default: err = op_char(ctx, c); } goto loop; escaped: c = *re++; switch (c) { case 0: return REG_EESCAPE; case '(': glevel++; err = op_gstart(ctx); break; case ')': glevel--; if (glevel < 0) return REG_EPAREN; err = op_gend(ctx); break; case '{': err = op_count_full(ctx, &re); break; case '.': case '^': case '$': case '*': case '[': case ']': case '\\': err = op_char(ctx, c); break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': err = op_bref(ctx, c); break; case '|': err = STRICT ? REG_BADPAT : op_or(ctx); break; default: err = parse_relaxed_escapes(ctx, c); } goto loop; } /* * Public compiling API. */ int regcomp(regex_t *rx, const char *re, int flags) { struct ParseCtx ctx; struct RegexInt *rxi; int err; struct MemPool *pool = NULL; /* do it first, to allow regfree() */ memset(rx, 0, sizeof(*rx)); if (flags & ~(REG_EXTENDED | REG_ICASE | REG_NOSUB | REG_NEWLINE | REG_RELAXED)) return REG_BADPAT; if (!*re) return REG_BADPAT; rxi = mempool_alloc(&pool, sizeof(*rxi)); if (!rxi) return REG_ESPACE; rx->internal = rxi; rxi->pool = pool; /* initialize rx and local context */ memset(&ctx, 0, sizeof(ctx)); ctx.rx = rx; ctx.rxi = rxi; ctx.strict = !(flags & REG_RELAXED_SYNTAX); rxi->flags = flags; /* setup group #0 */ rx->re_nsub = -1; err = op_gstart(&ctx); if (err) goto failed; /* launch proper parser */ if (flags & REG_EXTENDED) err = parse_posix_ext(&ctx, re); else err = parse_posix_basic(&ctx, re); /* finalize group #0 */ if (!err) err = finish_branch(&ctx); /* relax if details are not needed */ if (flags & REG_NOSUB) { rxi->flags |= REG_RELAXED_MATCHING; rx->re_nsub = 0; } failed: /* clean up if problems */ if (err) regfree(rx); return err; } /* * Matching code */ /* historical best match */ struct HMatch { const char *hist_start; const char *hist_end; int rep_len; /* if repeated seq, full len thus far */ }; /* per-group-match context */ struct GMatch { struct GMatch *parent; /* parent group */ const struct Op *owner; /* Op for this group */ const char *start; /* match start */ const char *end; /* match end, NULL if no match */ struct GMatch *prevgm; /* older stack entry */ struct HMatch hm_next; /* best match for following stack entry */ int count; /* match nr in repeated seq */ }; /* top context */ struct ExecCtx { const regex_t *rx; const struct RegexInt *rxi; const char *str_start; regmatch_t *pmatch; int nmatch; int flags; bool strict; const char *last_endpos; struct HMatch hm_first[MAX_GROUPS]; struct GMatch *gm_stack[MAX_GROUPS]; struct GMatch *gm_cache[MAX_GROUPS]; }; static void push_gm(struct ExecCtx *ctx, struct GMatch *gm) { int gno = gm->owner->grp_no; gm->prevgm = ctx->gm_stack[gno]; ctx->gm_stack[gno] = gm; } static void pop_gm(struct ExecCtx *ctx, struct GMatch *gm) { int gno = gm->owner->grp_no; ctx->gm_stack[gno] = gm->prevgm; } static inline int do_match(struct ExecCtx *ctx, const struct Op *op, const char *str, struct GMatch *gm) { return op->matcher(ctx, op, str, gm); } static int scan_next(struct ExecCtx *ctx, const struct Op *op, const char *str, struct GMatch *gm, int curcnt, int alen) { int err = REG_NOMATCH; bool gotmatch = false; if (curcnt == op->mincnt) return do_match(ctx, op->next, str, gm); for (; curcnt >= op->mincnt; curcnt--) { err = do_match(ctx, op->next, str, gm); if (STRICT && err == 0) gotmatch = true; else if (err != REG_NOMATCH) break; str -= alen; } if (err == REG_NOMATCH && gotmatch) err = 0; return err; } static int match_char(struct ExecCtx *ctx, const struct Op *op, const char *str, struct GMatch *gm) { bool icase = (ctx->flags & REG_ICASE); int c, i, maxcnt = SIMPLE_MAXCNT(op); for (i = 0; (i < maxcnt) && str[i]; i++) { c = icase ? tolower((unsigned char)str[i]) : str[i]; if (c != op->lit) break; } return scan_next(ctx, op, str + i, gm, i, 1); } static int match_any(struct ExecCtx *ctx, const struct Op *op, const char *str, struct GMatch *gm) { bool nl = (ctx->flags & REG_NEWLINE); int i, maxcnt = SIMPLE_MAXCNT(op); for (i = 0; (i < maxcnt) && str[i]; i++) { if (nl && str[i] == '\n') break; } return scan_next(ctx, op, str + i, gm, i, 1); } static int match_class(struct ExecCtx *ctx, const struct Op *op, const char *str, struct GMatch *gm) { int i, maxcnt = SIMPLE_MAXCNT(op); for (i = 0; (i < maxcnt); i++) { if (!class_isset(&op->cdata, str[i])) break; } return scan_next(ctx, op, str + i, gm, i, 1); } static int match_bol(struct ExecCtx *ctx, const struct Op *op, const char *str, struct GMatch *gm) { if (str == ctx->str_start && !(ctx->flags & REG_NOTBOL)) return do_match(ctx, op->next, str, gm); else if (str != ctx->str_start && str[-1] == '\n' && (ctx->flags & REG_NEWLINE)) return do_match(ctx, op->next, str, gm); return REG_NOMATCH; } static int match_eol(struct ExecCtx *ctx, const struct Op *op, const char *str, struct GMatch *gm) { if (*str == '\n' && (ctx->flags & REG_NEWLINE)) return do_match(ctx, op->next, str, gm); else if (*str == 0 && !(ctx->flags & REG_NOTEOL)) return do_match(ctx, op->next, str, gm); return REG_NOMATCH; } static int match_wchange(struct ExecCtx *ctx, const struct Op *op, const char *str, struct GMatch *gm) { bool prevw = (str == ctx->str_start) ? false : is_word(str[-1]); bool curw = is_word(str[0]); bool ischange = prevw ^ curw; if ((op->type == OP_WCHANGE) ? ischange : !ischange) return do_match(ctx, op->next, str, gm); return REG_NOMATCH; } static int match_bref(struct ExecCtx *ctx, const struct Op *op, const char *str, struct GMatch *gm) { bool icase = ctx->flags & REG_ICASE; int i; struct GMatch *bgm = ctx->gm_stack[op->bref]; int blen = (bgm && bgm->end) ? (bgm->end - bgm->start) : -1; /* handle no-match, zero-len, zero-count */ if (blen < 0 && op->mincnt > 0) return REG_NOMATCH; if (blen <= 0 || op->maxcnt == 0) return do_match(ctx, op->next, str, gm); /* find max matches */ for (i = 0; (i < op->maxcnt) && *str; i++) { if (icase && strncasecmp(str, bgm->start, blen) != 0) break; else if (!icase && strncmp(str, bgm->start, blen) != 0) break; str += blen; } return scan_next(ctx, op, str, gm, i, blen); } static int match_group(struct ExecCtx *ctx, const struct Op *op, const char *str, struct GMatch *gm) { int err = REG_NOMATCH; bool gotmatch = false; struct GMatch gthis; /* per-group-match context */ memset(>his, 0, sizeof(gthis)); gthis.owner = op; gthis.start = str; gthis.parent = gm; if (gm && gm->owner == op) { gthis.parent = gm->parent; gthis.count = gm->count + 1; } gm = >his; push_gm(ctx, gm); if (op->maxcnt > 0) { struct AndList *alist = op->gdata.or_list; /* check all branches, unless relaxed matching */ while (alist) { err = do_match(ctx, alist->op_list, str, gm); if (err == 0 && STRICT) { gm->end = NULL; gotmatch = true; } else if (err != REG_NOMATCH) break; alist = alist->next; } } /* is no-match allowed? */ if ((op->mincnt == 0) && (gm->count == 0) && (err == REG_NOMATCH || (err == 0 && STRICT))) { gm->end = NULL; err = do_match(ctx, op->next, str, gm->parent); } pop_gm(ctx, gm); return gotmatch ? 0 : err; } static int match_gend(struct ExecCtx *ctx, const struct Op *f_op, const char *str, struct GMatch *gm) { int err = REG_NOMATCH; const struct Op *op = gm->owner; bool zeromatch = (str == gm->start); bool gotmatch = false; /* ignore follow-up empty matches, unless it has backrefs */ if (zeromatch && gm->count > 0 && gm->count >= op->mincnt && !gm->owner->gdata.has_refs) return REG_NOMATCH; /* tag as matched */ gm->end = str; /* try more repeats, stop if count full or last match was zero-length */ if (gm->count + 1 < op->maxcnt && !zeromatch) { err = match_group(ctx, op, str, gm); if (err == 0 && STRICT) gotmatch = true; else if (err != REG_NOMATCH) return err; } /* fail if not enough repeats */ if (!zeromatch && gm->count + 1 < op->mincnt) return err; /* continue with parent branch */ err = do_match(ctx, op->next, str, gm->parent); if (err == REG_NOMATCH && gotmatch) err = 0; return err; } /* * The juice of POSIX - match weighting. */ static int gmatch_hist_cmp(struct ExecCtx *ctx, int gno, struct GMatch *gm, int replen) { struct HMatch *hm = (gm->prevgm) ? &gm->prevgm->hm_next : &ctx->hm_first[gno]; int gmlen = (gm->end) ? (gm->end - gm->start) : -1; int hmlen = (hm->hist_end) ? (hm->hist_end - hm->hist_start) : -1; int gmreplen = (gmlen >= 0) ? (gmlen + replen) : replen; int hmreplen = ((hmlen >= 0) ? hmlen : 0) + hm->rep_len; int gmofs = (gm->end) ? (gm->start - ctx->str_start) : -1; int hmofs = (hm->hist_start) ? (hm->hist_start - ctx->str_start) : -1; /* prefer rightmost match, to allow preceding elements match more */ int res = (gmofs - hmofs); /* prefer longer repeated match */ if (res == 0 && gm->count == 0) res = (gmreplen - hmreplen); /* prefer longer single match */ if (res == 0) res = (gmlen - hmlen); return res; } static int cmp_gmatches(struct ExecCtx *ctx, int gno, struct GMatch *gm, int replen) { int cmp = 0, gmlen; if (gm) { /* need to compare preceding groups first */ gmlen = gm->end ? gm->end - gm->start : 0; cmp = cmp_gmatches(ctx, gno, gm->prevgm, (gm->count == 0) ? 0 : (replen + gmlen)); /* actual comparision */ if (!cmp) cmp = gmatch_hist_cmp(ctx, gno, gm, replen); } return cmp; } static int gm_resolve_tie(struct ExecCtx *ctx, int gno) { struct GMatch *gm = ctx->gm_stack[gno]; if (!gm) /* 0-count match is better than no match */ return ctx->hm_first[gno].hist_start ? -1 : 0; return cmp_gmatches(ctx, gno, gm, 0); } static void fill_history(struct ExecCtx *ctx, int gno) { struct HMatch *hm; int gmlen, rep_len = 0; struct GMatch *gm = ctx->gm_stack[gno]; while (STRICT && gm) { hm = (gm->prevgm) ? &gm->prevgm->hm_next : &ctx->hm_first[gno]; hm->hist_start = gm->start; hm->hist_end = gm->end; hm->rep_len = rep_len; gmlen = gm->end ? (gm->end - gm->start) : 0; rep_len += gmlen; if (gm->count == 0) rep_len = 0; gm = gm->prevgm; } } static void publish_gm(struct ExecCtx *ctx, int gno) { struct GMatch *gm = ctx->gm_stack[gno]; regmatch_t *rm = ctx->pmatch + gno; /* ignore non-matches */ while (gm && !gm->end) gm = gm->prevgm; /* require it to be inside reported parent */ if (gm && gm->parent) { int pno = gm->parent->owner->grp_no; if (gm->parent != ctx->gm_cache[pno]) gm = NULL; } ctx->gm_cache[gno] = gm; /* publish new match */ if (gm) { rm->rm_so = gm->start - ctx->str_start; rm->rm_eo = gm->end - ctx->str_start; } else { rm->rm_so = -1; rm->rm_eo = -1; } } /* compare and publish */ static int got_full_match(struct ExecCtx *ctx, const struct Op *f_op, const char *str, struct GMatch *gm) { int gno, cmp; /* tag group as matched */ gm->end = str; /* ignore shorter matches */ if (ctx->last_endpos && str < ctx->last_endpos) return 0; /* longer or equal length */ if (str > ctx->last_endpos) { ctx->last_endpos = str; goto better_match; } else if (STRICT && ctx->nmatch > 1) { for (gno = 0; gno < ctx->nmatch; gno++) { cmp = gm_resolve_tie(ctx, gno); if (cmp < 0) break; if (cmp > 0) goto better_match; } } return 0; better_match: for (gno = 0; gno < ctx->nmatch; gno++) { publish_gm(ctx, gno); fill_history(ctx, gno); } return 0; } /* fill in proper matcher */ static void set_op_type(struct Op *op, enum OpType op_type) { static const matcher_f mlist[] = { match_char, match_any, match_class, match_group, match_bref, match_bol, match_eol, match_wchange, match_wchange, match_gend, got_full_match }; op->matcher = mlist[op_type]; op->type = op_type; } /* * Public matching API */ int regexec(const regex_t *rx, const char *str, size_t nmatch, regmatch_t pmatch[], int eflags) { int err; struct ExecCtx ctx; if (eflags & ~(REG_NOTBOL | REG_NOTEOL)) return REG_BADPAT; /* init local context */ memset(&ctx, 0, sizeof(ctx)); ctx.pmatch = pmatch; ctx.nmatch = nmatch; ctx.str_start = str; ctx.rx = rx; ctx.rxi = rx->internal; ctx.flags = ctx.rxi->flags | eflags; /* reset pmatch area */ if (!(ctx.flags & REG_NOSUB)) memset(pmatch, -1, nmatch * sizeof(regmatch_t)); /* decide pmatch area that will be used */ if (!pmatch || (ctx.flags & REG_NOSUB)) ctx.nmatch = 0; else if (nmatch > (size_t)rx->re_nsub + 1) ctx.nmatch = rx->re_nsub + 1; ctx.strict = !(ctx.flags & REG_RELAXED_MATCHING) && (ctx.nmatch > 0); /* execute search */ str--; do { str++; err = do_match(&ctx, ctx.rxi->root, str, NULL); } while ((err == REG_NOMATCH) && *str); return err; } /* * Free parse tree */ void regfree(regex_t *rx) { struct RegexInt *rxi; if (rx) { rxi = rx->internal; if (rxi) mempool_destroy(&rxi->pool); memset(rx, 0, sizeof(*rx)); } } /* * Error strings */ size_t regerror(int err, const regex_t *rx, char *dst, size_t dstlen) { static const char errlist[][9] = { "NOERROR", /* 0 */ "NOMATCH", /* 1 */ "BADBR", /* 2 */ "BADPAT", /* 3 */ "BADRPT", /* 4 */ "EBRACE", /* 5 */ "EBRACK", /* 6 */ "ECOLLATE", /* 7 */ "ECTYPE", /* 8 */ "EESCAPE", /* 9 */ "EPAREN", /* 10 */ "ERANGE", /* 11 */ "ESPACE", /* 12 */ "ESUBREG", /* 13 */ }; const char *s = "EUNKNOWN"; if ((size_t)err < ARRAY_NELEM(errlist)) s = errlist[err]; return snprintf(dst, dstlen, "%s", s); } #endif /* !USE_SYSTEM_REGEX */ skytools-3.2.6/lib/usual/mbuf.c0000644000000000000000000000117412166266754013315 0ustar /* * Safe and easy access to memory buffer. */ #include bool mbuf_make_room(struct MBuf *buf, unsigned len) { unsigned new_alloc = buf->alloc_len; void *ptr; /* is it a dynamic buffer */ if (buf->reader || buf->fixed) return false; /* maybe there is enough room already */ if (buf->write_pos + len <= buf->alloc_len) return true; if (new_alloc == 0) new_alloc = 128; /* calc new alloc size */ while (new_alloc < buf->write_pos + len) new_alloc *= 2; /* realloc */ ptr = realloc(buf->data, new_alloc); if (!ptr) return false; buf->data = ptr; buf->alloc_len = new_alloc; return true; } skytools-3.2.6/lib/usual/heap.h0000644000000000000000000000542312166266754013307 0ustar /* * Binary Heap. * * Copyright (c) 2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** @file * Binary heap. * * Binary heap is sort of binary tree held inside array, * with following 2 properties: * - heap property: each node is "better" than it's childs. * - shape property: binary tree is complete, meaning all levels * except the last one are fully filled. * * Instead of "min"- or "max"-heap, this is "best"-heap, * as it operates with user-defined heap_is_better() functions, * which is used to bubble elements on top. */ #ifndef _USUAL_HEAP_H_ #define _USUAL_HEAP_H_ #include /** * Object comparision function. * * Should return true if a needs to reach top before b, * false if not or equal. */ typedef bool (*heap_is_better_f)(const void *a, const void *b); /** * Heap position storage. * * If user wants to delete elements from the middle of heap, * this function should be used to keep track where the element * is located. */ typedef void (*heap_save_pos_f)(void *a, unsigned pos); /** * Heap object. */ struct Heap; /** * Create new heap object. * * @param is_better_cb Callback to decide priority. * @param save_pos_cb Callback to store current index. * @param cx Allocation context. */ struct Heap *heap_create( heap_is_better_f is_better_cb, heap_save_pos_f save_pos_cb, CxMem *cx); /** Release memory allocated by heap */ void heap_destroy(struct Heap *h); /** Put new object into heap */ bool heap_push(struct Heap *h, void *ptr); /** Remove and return topmost object from heap */ void *heap_pop(struct Heap *h); /** Return topmost object in heap */ void *heap_top(struct Heap *h); /** Remove and return any object from heap by index */ void *heap_remove(struct Heap *h, unsigned pos); /** * Reserve room for more elements. * * Returns false if allocation failed. */ bool heap_reserve(struct Heap *h, unsigned extra); /** Return number of objects in heap */ unsigned heap_size(struct Heap *h); /* Return object by index, for testing */ void *heap_get_obj(struct Heap *h, unsigned pos); #endif skytools-3.2.6/lib/usual/pthread.c0000644000000000000000000000405712166266754014016 0ustar /* * Pthreads compat. * * Copyright (c) 2007-2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #ifndef HAVE_PTHREAD_H #ifdef WIN32 /* * basic pthreads for win32. */ struct _w32thread { void *(*fn)(void *); void *arg; }; static DWORD WINAPI w32launcher(LPVOID arg) { struct _w32thread *info = arg; info->fn(info->arg); free(info); return 0; } int pthread_create(pthread_t *t, pthread_attr_t *attr, void *(*fn)(void *), void *arg) { struct _w32thread *info = calloc(1, sizeof(*info)); if (!info) return -1; info->fn = fn; info->arg = arg; *t = CreateThread(NULL, 0, w32launcher, info, 0, NULL); if (*t == NULL) return -1; return 0; } int pthread_join(pthread_t *t, void **ret) { if (WaitForSingleObject(*t, INFINITE) != WAIT_OBJECT_0) return -1; CloseHandle(*t); return 0; } int pthread_mutex_init(pthread_mutex_t *lock, void *unused) { *lock = CreateMutex(NULL, FALSE, NULL); if (*lock == NULL) return -1; return 0; } int pthread_mutex_destroy(pthread_mutex_t *lock) { if (*lock) { CloseHandle(*lock); *lock = NULL; } return 0; } int pthread_mutex_lock(pthread_mutex_t *lock) { if (WaitForSingleObject(*lock, INFINITE) != WAIT_OBJECT_0) return -1; return 0; } int pthread_mutex_unlock(pthread_mutex_t *lock) { if (!ReleaseMutex(*lock)) return -1; return 0; } #endif /* win32 */ #endif /* !HAVE_PTHREAD_H */ skytools-3.2.6/lib/usual/logging.h0000644000000000000000000001166412166266754014024 0ustar /* * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * * Logging framework for unix services. * * * Supported outputs: * - syslog * - log file * - stderr * * @section logging_prefix Logging context * * It is possible to pass context info to all logging calls * and later add details to log lines or to filter based on it. * * Each call references 2 macros: * - LOG_CONTEXT_DEF - which can define/call any variables * - LOG_CONTEXT - which should return a pointer variable. * * Later, global callback function \ref logging_prefix_cb * will get this pointer with destination buffer and can either * add more info for log line or tell to skip logging this message. */ #ifndef _USUAL_LOGGING_H_ #define _USUAL_LOGGING_H_ #include /* internal log levels */ enum LogLevel { LG_FATAL = 0, LG_ERROR = 1, LG_WARNING = 2, LG_STATS = 3, LG_INFO = 4, LG_DEBUG = 5, LG_NOISE = 6, }; #ifndef LOG_CONTEXT_DEF /** Example: Prepare dummy context pointer */ #define LOG_CONTEXT_DEF void *_log_ctx = NULL #endif #ifndef LOG_CONTEXT /** Example: Reference dummy context pointer */ #define LOG_CONTEXT _log_ctx #endif /** * Signature for logging_prefix_cb. Return value is either added string length in dst * or negative value to skip logging. */ typedef int (*logging_prefix_fn_t)(enum LogLevel lev, void *ctx, char *dst, unsigned int dstlen); /** * Optional global callback for each log line. * * It can either add info to log message or skip logging it. */ extern logging_prefix_fn_t logging_prefix_cb; /** * Global verbosity level. * * 0 - show only info level msgs (default) * 1 - show debug msgs (log_debug) * 2 - show noise msgs (log_noise) */ extern int cf_verbose; /** * Toggle logging to stderr. Default: 1. * daemon.c turns this off if goes to background */ extern int cf_quiet; /** * Logfile location, default NULL */ extern const char *cf_logfile; /** Syslog on/off */ extern int cf_syslog; /** ident for syslog, if NULL syslog is disabled (default) */ extern const char *cf_syslog_ident; /** Facility name */ extern const char *cf_syslog_facility; /** Max log level for syslog writer */ extern enum LogLevel cf_syslog_level; /** Max log level for logfile writer */ extern enum LogLevel cf_logfile_level; /** Max log level for stderr writer */ extern enum LogLevel cf_stderr_level; /* * Internal API. */ /* non-fatal logging */ void log_generic(enum LogLevel level, void *ctx, const char *s, ...) _PRINTF(3, 4); /* this is also defined in base.h for Assert() */ void log_fatal(const char *file, int line, const char *func, bool show_perror, void *ctx, const char *s, ...) _PRINTF(6, 7); /* * Public API */ /** Log error message */ #define log_error(...) do { LOG_CONTEXT_DEF; \ log_generic(LG_ERROR, LOG_CONTEXT, __VA_ARGS__); \ } while (0) /** Log warning message */ #define log_warning(...) do { LOG_CONTEXT_DEF; \ log_generic(LG_WARNING, LOG_CONTEXT, __VA_ARGS__); \ } while (0) /** Log stats (liveness) message */ #define log_stats(...) do { LOG_CONTEXT_DEF; \ log_generic(LG_STATS, LOG_CONTEXT, __VA_ARGS__); \ } while (0) /** Log info message */ #define log_info(...) do { LOG_CONTEXT_DEF; \ log_generic(LG_INFO, LOG_CONTEXT, __VA_ARGS__); \ } while (0) /** Log debug message */ #define log_debug(...) do { LOG_CONTEXT_DEF; \ if (unlikely(cf_verbose > 0)) \ log_generic(LG_DEBUG, LOG_CONTEXT, __VA_ARGS__); \ } while (0) /** Log debug noise */ #define log_noise(...) do { LOG_CONTEXT_DEF; \ if (unlikely(cf_verbose > 1)) \ log_generic(LG_NOISE, LOG_CONTEXT, __VA_ARGS__); \ } while (0) /** Log and die. It also logs source location */ #define fatal(...) do { LOG_CONTEXT_DEF; \ log_fatal(__FILE__, __LINE__, __func__, false, LOG_CONTEXT, __VA_ARGS__); \ exit(1); } while (0) /** Log strerror and die. Error message also includes strerror(errno) */ #define fatal_perror(...) do { LOG_CONTEXT_DEF; \ log_fatal(__FILE__, __LINE__, __func__, true, LOG_CONTEXT, __VA_ARGS__); \ exit(1); } while (0) /** Less verbose fatal() */ #define die(...) do { LOG_CONTEXT_DEF; \ log_generic(LG_FATAL, LOG_CONTEXT, __VA_ARGS__); \ exit(1); } while (0) /** * Close open logfiles and syslog. * * Useful when rotating log files. */ void reset_logging(void); #endif skytools-3.2.6/lib/usual/cxextra.h0000644000000000000000000000304212166266754014043 0ustar /* * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * * Extra allocators for cxalloc. */ #ifndef _USUAL_CXEXTRA_H_ #define _USUAL_CXEXTRA_H_ #include /** Allocator that exits on error. .ctx should be pointer to actual allocator */ extern const struct CxOps cx_nofail_ops; /** nofail for libc */ extern CxMem cx_libc_nofail; /** * Creates allocator that pools all memory together, * without keeping track of single objects, to be * freed all together in one shot. * * realloc(), free() are partially supported for the last * objec only. */ CxMem *cx_new_pool(CxMem *parent); /** * Creates allocator that remebers all allocations done * under it and allows all of it to be freed together. * * Supports hierarchical trees. */ CxMem *cx_new_tree(CxMem *parent); #endif skytools-3.2.6/lib/usual/mdict.h0000644000000000000000000000555712166266754013502 0ustar /* * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** @file * * Minimal dict. */ #ifndef _USUAL_MDICT_H_ #define _USUAL_MDICT_H_ #include #include /** Dict reference */ struct MDict; /** Create new emtpy dict */ struct MDict *mdict_new(CxMem *cx); /** Free dict */ void mdict_free(struct MDict *dict); /** Get value as MBuf from string */ const struct MBuf *mdict_get_buf(struct MDict *dict, const char *key, unsigned klen); /** Get value from dict */ const char *mdict_get_str(struct MDict *dict, const char *key, unsigned klen); /** Put string to dict */ bool mdict_put_str(struct MDict *dict, const char *key, unsigned klen, const char *val, unsigned vlen); /** Remove a key from dict */ bool mdict_del_key(struct MDict *dict, const char *key, unsigned klen); /** Signature for walker callback */ typedef bool (*mdict_walker_f)(void *arg, const struct MBuf *k, const struct MBuf *v); /** Walk over dict */ bool mdict_walk(struct MDict *dict, mdict_walker_f cb_func, void *cb_arg); /* * Simple API that calculates strlen inline. */ /** Get value from dict */ static inline const char *mdict_get(struct MDict *dict, const char *key) { return mdict_get_str(dict, key, strlen(key)); } /** Put zero-terminated key and value to dict */ static inline bool mdict_put(struct MDict *dict, const char *key, const char *val) { unsigned klen = strlen(key); unsigned vlen = val ? strlen(val) : 0; return mdict_put_str(dict, key, klen, val, vlen); } /** Put MBuf to dict */ static inline bool mdict_put_buf(struct MDict *dict, const char *key, const struct MBuf *buf) { unsigned klen = strlen(key); const char *val = buf ? mbuf_data(buf) : NULL; unsigned vlen = buf ? mbuf_written(buf) : 0; return mdict_put_str(dict, key, klen, val, vlen); } /** Remove value from dict */ static inline bool mdict_del(struct MDict *dict, const char *key) { return mdict_del_key(dict, key, strlen(key)); } /** Urldecode string and add keys with values to dict */ bool mdict_urldecode(struct MDict *dict, const char *str, unsigned len); /** Urlencode dict to string */ bool mdict_urlencode(struct MDict *dict, struct MBuf *dst); #endif skytools-3.2.6/lib/usual/err.h0000644000000000000000000000375012166266754013163 0ustar /* * Cmdline error reporting. * * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * * Error printing for command-line utilities. */ #ifndef _USUAL_ERR_H_ #define _USUAL_ERR_H_ #include #ifdef HAVE_ERR_H #include #endif #ifndef HAVE_ERR /** Print formatted message and strerror(errno) to stderr and exit with given error code */ void err(int e, const char *fmt, ...) _PRINTF(2, 3); #endif #ifndef HAVE_ERRX /** Print formatted message to stderr and exit with given error code */ void errx(int e, const char *fmt, ...) _PRINTF(2, 3); #endif #ifndef HAVE_WARN /** Print formatted message and strerror(errno) to stderr */ void warn(const char *fmt, ...) _PRINTF(1, 2); #endif #ifndef HAVE_WARNX /** Print formatted message to stderr */ void warnx(const char *fmt, ...) _PRINTF(1, 2); #endif #ifndef HAVE_SETPROGNAME /** Set program name to that will printed as prefix to error messages */ void setprogname(const char *s); #endif #ifndef HAVE_GETPROGNAME /** Return program name set with @ref setprogname */ const char *getprogname(void); #endif /** Malloc that exits on failure */ void *xmalloc(size_t len); /** Realloc that exits on failure */ void *xrealloc(void *p, size_t len); /** strdup that exits on failure */ char *xstrdup(const char *s); #endif skytools-3.2.6/lib/usual/signal.h0000644000000000000000000000503412166266754013645 0ustar /* * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** @file * Signals compat. * * general * - sigaction() -> signal() * * win32: * - SIGALRM, alarm(), signal(SIGALRM), sigaction(SIGALRM) * - kill(pid, 0) */ #ifndef _USUAL_SIGNAL_H_ #define _USUAL_SIGNAL_H_ #include #include /* * Compat sigval, detect based on siginfo_t.si_code. */ #if !defined(SI_QUEUE) && !defined(HAVE_SIGQUEUE) union sigval { int sival_int; void *sival_ptr; }; #endif /* * Compat sigevent */ #ifndef SIGEV_NONE #define SIGEV_NONE 0 #define SIGEV_SIGNAL 1 #define SIGEV_THREAD 2 struct sigevent { int sigev_notify; int sigev_signo; union sigval sigev_value; void (*sigev_notify_function)(union sigval); }; #endif /* * Compat sigaction() */ #ifndef HAVE_SIGACTION #define SA_SIGINFO 1 #define SA_RESTART 2 typedef struct siginfo_t siginfo_t; struct sigaction { union { void (*sa_handler)(int); void (*sa_sigaction)(int, siginfo_t *, void *); }; int sa_flags; int sa_mask; }; #define sigemptyset(s) #define sigfillset(s) #define sigaddset(s, sig) #define sigdelset(s, sig) #define sigaction(a,b,c) compat_sigaction(a,b,c) int sigaction(int sig, const struct sigaction *sa, struct sigaction *old); #endif /* * win32 compat: * kill(), alarm, SIGALRM */ #ifdef WIN32 #define SIGALRM 1023 #define SIGBUS 1022 unsigned alarm(unsigned); int kill(int pid, int sig); typedef void (*_sighandler_t)(int); static inline _sighandler_t wrap_signal(int sig, _sighandler_t func) { /* sigaction has custom handling for SIGALRM */ if (sig == SIGALRM) { struct sigaction sa, oldsa; sa.sa_handler = func; sa.sa_flags = sa.sa_mask = 0; sigaction(SIGALRM, &sa, &oldsa); return oldsa.sa_handler; } else if (sig == SIGBUS) { return NULL; } return signal(sig, func); } #define signal(a,b) wrap_signal(a,b) #endif #endif skytools-3.2.6/lib/usual/fnmatch.h0000644000000000000000000000320112166266754014002 0ustar /* * fnmatch.h * * Copyright (c) 2012 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * \file * Theme include for strings. */ #ifndef _USUAL_FNMATCH_H_ #define _USUAL_FNMATCH_H_ #include #ifdef HAVE_FNMATCH_H #include #else #define NEED_USUAL_FNMATCH #endif #ifdef NEED_USUAL_FNMATCH #define fnmatch(p,s,f) usual_fnmatch(p,s,f) /** Do not allow wildcard to match '/' */ #define FNM_PATHNAME 1 /** Treat '\\' as literal value */ #define FNM_NOESCAPE 2 /** Do not allow wildcard to match leading '.' */ #define FNM_PERIOD 4 /** (GNU) Match case-insensitively */ #define FNM_CASEFOLD 8 /** (GNU) Match leading directory in path */ #define FNM_LEADING_DIR 16 /* (GNU) random alias */ #define FNM_FILE_NAME FNM_PATHNAME /** Returned on no match */ #define FNM_NOMATCH 1 /** * Compat: fnmatch() */ int fnmatch(const char *pat, const char *str, int flags); #endif /* NEED_USUAL_FNMATCH */ #endif /* !_USUAL_FNMATCH_H_ */ skytools-3.2.6/lib/usual/cbtree.h0000644000000000000000000000442212166266754013634 0ustar /* * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** @file * * Crit-bit tree / binary radix tree. */ #ifndef _USUAL_CBTREE_H_ #define _USUAL_CBTREE_H_ #include /** returns length of the key */ typedef unsigned int (*cbtree_getkey_func)(void *ctx, void *obj, const void **dst_p); /** walk over tree */ typedef bool (*cbtree_walker_func)(void *ctx, void *obj); /** Handle to tree */ struct CBTree; /** * Create new tree. * * @param obj_key_cb callback to get the key for a object * @param obj_free_cb callback to free the object when tree node is freed (optional) * @param cb_ctx extra pointer passed to callbacks * @param cx memory context where from allocate */ struct CBTree *cbtree_create(cbtree_getkey_func obj_key_cb, cbtree_walker_func obj_free_cb, void *cb_ctx, CxMem *cx); /** * frees all resources allocated. * If obj_free_cb is non-NULL, it will be called per each object. */ void cbtree_destroy(struct CBTree *tree); /** Inserts new node to tree */ bool cbtree_insert(struct CBTree *tree, void *obj) _MUSTCHECK; /** Removed node from tree. * If obj_free_cb is non-NULL, it will be called for the object. * * @returns true if key was found, false otherwise. */ bool cbtree_delete(struct CBTree *tree, const void *key, unsigned klen); /** * Lookup a key. * * @returns object pointer if found, NULL ohterwise */ void *cbtree_lookup(struct CBTree *tree, const void *key, unsigned klen); /** Walk over tree */ bool cbtree_walk(struct CBTree *tree, cbtree_walker_func cb_func, void *cb_arg); #endif skytools-3.2.6/lib/usual/mempool.h0000644000000000000000000000217112166266754014037 0ustar /* * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * Simple memory pool for variable-length allocations. */ #ifndef _USUAL_MEMPOOL_H_ #define _USUAL_MEMPOOL_H_ #include /** Pool Reference */ struct MemPool; /** Allocate from pool */ void *mempool_alloc(struct MemPool **pool, unsigned size) _MALLOC; /** Release all memory in pool */ void mempool_destroy(struct MemPool **pool); #endif skytools-3.2.6/lib/usual/crypto/0000755000000000000000000000000012166266754013535 5ustar skytools-3.2.6/lib/usual/crypto/md5.h0000644000000000000000000000264012166266754014375 0ustar /* * MD5 implementation based on RFC1321. * * Copyright (c) 2008 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * * MD5 cryptographic hash. */ #ifndef _USUAL_CRYPTO_MD5_H_ #define _USUAL_CRYPTO_MD5_H_ #include /** Block length for MD5 */ #define MD5_BLOCK_LENGTH 64 /** Result length for MD5 */ #define MD5_DIGEST_LENGTH 16 /** MD5 state */ struct md5_ctx { uint64_t nbytes; uint32_t a, b, c, d; uint32_t buf[16]; }; /** Clean state */ void md5_reset(struct md5_ctx *ctx); /** Update state with more data */ void md5_update(struct md5_ctx *ctx, const void *data, unsigned int len); /** Get final result */ void md5_final(struct md5_ctx *ctx, uint8_t *dst); #endif skytools-3.2.6/lib/usual/crypto/sha1.h0000644000000000000000000000264612166266754014552 0ustar /* * SHA1 implementation based on RFC3174. * * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * * SHA1 implementation. */ #ifndef _USUAL_CRYPTO_SHA1_H_ #define _USUAL_CRYPTO_SHA1_H_ #include /** Block length for SHA1 */ #define SHA1_BLOCK_SIZE 64 /** Result length for SHA1 */ #define SHA1_DIGEST_LENGTH 20 /** SHA1 state */ struct sha1_ctx { uint64_t nbytes; uint32_t a, b, c, d, e; uint32_t buf[SHA1_BLOCK_SIZE / 4]; }; /** Clean state */ void sha1_reset(struct sha1_ctx *ctx); /** Update state with more data */ void sha1_update(struct sha1_ctx *ctx, const void *data, unsigned int len); /** Get final result */ void sha1_final(struct sha1_ctx *ctx, uint8_t *dst); #endif skytools-3.2.6/lib/usual/crypto/sha512.h0000644000000000000000000000301612166266754014711 0ustar /* * SHA2-512 implementation based on FIPS180-2. * * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _USUAL_CRYPTO_SHA512_H_ #define _USUAL_CRYPTO_SHA512_H_ #include #define SHA384_BLOCK_SIZE (16*8) #define SHA512_BLOCK_SIZE (16*8) #define SHA384_DIGEST_LENGTH (384/8) #define SHA512_DIGEST_LENGTH (512/8) struct sha512_ctx { union { uint64_t words[16]; uint8_t raw[16 * 8]; } buf; uint64_t state[8]; uint64_t nbytes; }; void sha512_reset(struct sha512_ctx *ctx); void sha512_update(struct sha512_ctx *ctx, const void *data, unsigned int len); void sha512_final(struct sha512_ctx *ctx, uint8_t *dst); void sha384_reset(struct sha512_ctx *ctx); void sha384_update(struct sha512_ctx *ctx, const void *data, unsigned int len); void sha384_final(struct sha512_ctx *ctx, uint8_t *dst); #endif skytools-3.2.6/lib/usual/crypto/sha512.c0000644000000000000000000001705312166266754014712 0ustar /* * SHA2-512 implementation based on FIPS180-2. * * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include /* repeat with increasing offset */ #define R4(R, t) R(t+0); R(t+1); R(t+2); R(t+3) #define R16(R, t) R4(R, t+0); R4(R, t+4); R4(R, t+8); R4(R, t+12) #define R64(R, t) R16(R, t+0); R16(R, t+16); R16(R, t+32); R16(R, t+48); #define bufpos(ctx) ((ctx)->nbytes & (SHA512_BLOCK_SIZE - 1)) /* * initial values */ static const uint64_t H384[8] = { UINT64_C(0xcbbb9d5dc1059ed8), UINT64_C(0x629a292a367cd507), UINT64_C(0x9159015a3070dd17), UINT64_C(0x152fecd8f70e5939), UINT64_C(0x67332667ffc00b31), UINT64_C(0x8eb44a8768581511), UINT64_C(0xdb0c2e0d64f98fa7), UINT64_C(0x47b5481dbefa4fa4), }; static const uint64_t H512[8] = { UINT64_C(0x6a09e667f3bcc908), UINT64_C(0xbb67ae8584caa73b), UINT64_C(0x3c6ef372fe94f82b), UINT64_C(0xa54ff53a5f1d36f1), UINT64_C(0x510e527fade682d1), UINT64_C(0x9b05688c2b3e6c1f), UINT64_C(0x1f83d9abfb41bd6b), UINT64_C(0x5be0cd19137e2179), }; /* * constants for mixing */ static const uint64_t K[80] = { UINT64_C(0x428a2f98d728ae22), UINT64_C(0x7137449123ef65cd), UINT64_C(0xb5c0fbcfec4d3b2f), UINT64_C(0xe9b5dba58189dbbc), UINT64_C(0x3956c25bf348b538), UINT64_C(0x59f111f1b605d019), UINT64_C(0x923f82a4af194f9b), UINT64_C(0xab1c5ed5da6d8118), UINT64_C(0xd807aa98a3030242), UINT64_C(0x12835b0145706fbe), UINT64_C(0x243185be4ee4b28c), UINT64_C(0x550c7dc3d5ffb4e2), UINT64_C(0x72be5d74f27b896f), UINT64_C(0x80deb1fe3b1696b1), UINT64_C(0x9bdc06a725c71235), UINT64_C(0xc19bf174cf692694), UINT64_C(0xe49b69c19ef14ad2), UINT64_C(0xefbe4786384f25e3), UINT64_C(0x0fc19dc68b8cd5b5), UINT64_C(0x240ca1cc77ac9c65), UINT64_C(0x2de92c6f592b0275), UINT64_C(0x4a7484aa6ea6e483), UINT64_C(0x5cb0a9dcbd41fbd4), UINT64_C(0x76f988da831153b5), UINT64_C(0x983e5152ee66dfab), UINT64_C(0xa831c66d2db43210), UINT64_C(0xb00327c898fb213f), UINT64_C(0xbf597fc7beef0ee4), UINT64_C(0xc6e00bf33da88fc2), UINT64_C(0xd5a79147930aa725), UINT64_C(0x06ca6351e003826f), UINT64_C(0x142929670a0e6e70), UINT64_C(0x27b70a8546d22ffc), UINT64_C(0x2e1b21385c26c926), UINT64_C(0x4d2c6dfc5ac42aed), UINT64_C(0x53380d139d95b3df), UINT64_C(0x650a73548baf63de), UINT64_C(0x766a0abb3c77b2a8), UINT64_C(0x81c2c92e47edaee6), UINT64_C(0x92722c851482353b), UINT64_C(0xa2bfe8a14cf10364), UINT64_C(0xa81a664bbc423001), UINT64_C(0xc24b8b70d0f89791), UINT64_C(0xc76c51a30654be30), UINT64_C(0xd192e819d6ef5218), UINT64_C(0xd69906245565a910), UINT64_C(0xf40e35855771202a), UINT64_C(0x106aa07032bbd1b8), UINT64_C(0x19a4c116b8d2d0c8), UINT64_C(0x1e376c085141ab53), UINT64_C(0x2748774cdf8eeb99), UINT64_C(0x34b0bcb5e19b48a8), UINT64_C(0x391c0cb3c5c95a63), UINT64_C(0x4ed8aa4ae3418acb), UINT64_C(0x5b9cca4f7763e373), UINT64_C(0x682e6ff3d6b2b8a3), UINT64_C(0x748f82ee5defb2fc), UINT64_C(0x78a5636f43172f60), UINT64_C(0x84c87814a1f0ab72), UINT64_C(0x8cc702081a6439ec), UINT64_C(0x90befffa23631e28), UINT64_C(0xa4506cebde82bde9), UINT64_C(0xbef9a3f7b2c67915), UINT64_C(0xc67178f2e372532b), UINT64_C(0xca273eceea26619c), UINT64_C(0xd186b8c721c0c207), UINT64_C(0xeada7dd6cde0eb1e), UINT64_C(0xf57d4f7fee6ed178), UINT64_C(0x06f067aa72176fba), UINT64_C(0x0a637dc5a2c898a6), UINT64_C(0x113f9804bef90dae), UINT64_C(0x1b710b35131c471b), UINT64_C(0x28db77f523047d84), UINT64_C(0x32caab7b40c72493), UINT64_C(0x3c9ebe0a15c9bebc), UINT64_C(0x431d67c49c100d4c), UINT64_C(0x4cc5d4becb3e42b6), UINT64_C(0x597f299cfc657e2a), UINT64_C(0x5fcb6fab3ad6faec), UINT64_C(0x6c44198c4a475817), }; /* * mixing */ #define CH(x,y,z) ((x & y) ^ ((~x) & z)) #define MAJ(x,y,z) ((x & y) ^ (x & z) ^ (y & z)) #define E0(x) (ror64(x, 28) ^ ror64(x, 34) ^ ror64(x, 39)) #define E1(x) (ror64(x, 14) ^ ror64(x, 18) ^ ror64(x, 41)) #define O0(x) (ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7)) #define O1(x) (ror64(x, 19) ^ ror64(x, 61) ^ (x >> 6)) #define W(n) (ctx->buf.words[(n) & 15]) #define setW(n,v) W(n) = (v) #define SHA512_ROUND(_t) do { \ uint64_t tmp1, tmp2, t = (_t); \ if (t >= 16) { \ setW(t, O1(W(t - 2)) + W(t - 7) + O0(W(t - 15)) + W(t - 16)); \ } else { \ /* convert endianess on first go */ \ setW(t, be64toh(W(t))); \ } \ tmp1 = h + E1(e) + CH(e,f,g) + K[k_pos++] + W(t); \ tmp2 = E0(a) + MAJ(a,b,c); \ h = g; g = f; f = e; e = d + tmp1; d = c; c = b; b = a; a = tmp1 + tmp2; \ } while (0) /* * actual core */ static void sha512_core(struct sha512_ctx *ctx) { uint64_t *state = ctx->state; uint64_t a = state[0], b = state[1], c = state[2], d = state[3]; uint64_t e = state[4], f = state[5], g = state[6], h = state[7]; unsigned k_pos = 0; R16(SHA512_ROUND, 0); while (k_pos < 80) { R16(SHA512_ROUND, 16); } state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; state[5] += f; state[6] += g; state[7] += h; } /* * Public API for SHA512. */ void sha512_reset(struct sha512_ctx *ctx) { memset(ctx, 0, sizeof(*ctx)); memcpy(ctx->state, H512, sizeof(H512)); } void sha512_update(struct sha512_ctx *ctx, const void *data, unsigned int len) { unsigned int n; const uint8_t *src = data; uint8_t *dst = ctx->buf.raw; while (len > 0) { n = SHA512_BLOCK_SIZE - bufpos(ctx); if (n > len) n = len; memcpy(dst + bufpos(ctx), src, n); src += n; len -= n; ctx->nbytes += n; if (bufpos(ctx) == 0) sha512_core(ctx); } } void sha512_final(struct sha512_ctx *ctx, uint8_t *dst) { static const uint8_t padding[SHA512_BLOCK_SIZE] = { 0x80 }; uint64_t nbits = ctx->nbytes * 8; int i, pad_len; /* add padding */ pad_len = SHA512_BLOCK_SIZE - 16 - bufpos(ctx); if (pad_len <= 0) pad_len += SHA512_BLOCK_SIZE; sha512_update(ctx, padding, pad_len); /* add length */ ctx->buf.words[14] = 0; ctx->buf.words[15] = htobe64(nbits); /* final result */ sha512_core(ctx); for (i = 0; i < SHA512_DIGEST_LENGTH / 8; i++) be64enc(dst + i*8, ctx->state[i]); } /* * Public API for SHA384. */ void sha384_reset(struct sha512_ctx *ctx) { memset(ctx, 0, sizeof(*ctx)); memcpy(ctx->state, H384, sizeof(H384)); } void sha384_update(struct sha512_ctx *ctx, const void *data, unsigned int len) { sha512_update(ctx, data, len); } void sha384_final(struct sha512_ctx *ctx, uint8_t *dst) { uint8_t buf[SHA512_DIGEST_LENGTH]; sha512_final(ctx, buf); memcpy(dst, buf, SHA384_DIGEST_LENGTH); memset(buf, 0, sizeof(buf)); } /* * DigestInfo */ const struct DigestInfo *digest_SHA384(void) { static const struct DigestInfo info = { (DigestInitFunc *)sha384_reset, (DigestUpdateFunc *)sha384_update, (DigestFinalFunc *)sha384_final, sizeof(struct sha512_ctx), SHA384_DIGEST_LENGTH, SHA384_BLOCK_SIZE }; return &info; } const struct DigestInfo *digest_SHA512(void) { static const struct DigestInfo info = { (DigestInitFunc *)sha512_reset, (DigestUpdateFunc *)sha512_update, (DigestFinalFunc *)sha512_final, sizeof(struct sha512_ctx), SHA512_DIGEST_LENGTH, SHA512_BLOCK_SIZE }; return &info; } skytools-3.2.6/lib/usual/crypto/sha1.c0000644000000000000000000000740112166266754014537 0ustar /* * SHA1 implementation based on RFC3174. * * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #define bufpos(ctx) ((ctx)->nbytes & (SHA1_BLOCK_SIZE - 1)) /* * SHA1 core. */ #define W(n) (buf[(n) & 15]) #define setW(n, val) W(n) = val /* base SHA1 operation */ #define SHA1OP(_t, fn, K) do { \ uint32_t tmp, t = (_t); \ if (t >= 16) { \ tmp = W(t - 3) ^ W(t - 8) ^ W(t - 14) ^ W(t - 16); \ setW(t, rol32(tmp, 1)); \ } else { \ /* convert endianess on first go */ \ setW(t, be32toh(W(t))); \ } \ tmp = rol32(a, 5) + fn(b, c, d) + e + W(t) + K; \ e = d; d = c; c = rol32(b, 30); b = a; a = tmp; \ } while (0) /* mix functions */ #define F0(b, c, d) (d ^ (b & (c ^ d))) #define F1(b, c, d) (b ^ c ^ d) #define F2(b, c, d) ((b & c) | (b & d) | (c & d)) #define F3(b, c, d) (b ^ c ^ d) /* operation details for each round */ #define SHA1R0(t) SHA1OP(t, F0, 0x5a827999) #define SHA1R1(t) SHA1OP(t, F1, 0x6ed9eba1) #define SHA1R2(t) SHA1OP(t, F2, 0x8f1bbcdc) #define SHA1R3(t) SHA1OP(t, F3, 0xca62c1d6) /* repeat with increasing offset */ #define R4(R, t) R(t+0); R(t+1); R(t+2); R(t+3) #define R16(R, t) R4(R, t+0); R4(R, t+4); R4(R, t+8); R4(R, t+12) #define R20(R, t) R16(R, t+0); R4(R, t+16) static void sha1_core(struct sha1_ctx * ctx, uint32_t *buf) { uint32_t a, b, c, d, e; a = ctx->a; b = ctx->b; c = ctx->c; d = ctx->d; e = ctx->e; R20(SHA1R0, 0); R20(SHA1R1, 20); R20(SHA1R2, 40); R20(SHA1R3, 60); ctx->a += a; ctx->b += b; ctx->c += c; ctx->d += d; ctx->e += e; } /* * Public API. */ void sha1_reset(struct sha1_ctx *ctx) { ctx->nbytes = 0; ctx->a = 0x67452301; ctx->b = 0xefcdab89; ctx->c = 0x98badcfe; ctx->d = 0x10325476; ctx->e = 0xc3d2e1f0; } void sha1_update(struct sha1_ctx *ctx, const void *data, unsigned int len) { unsigned int n; const uint8_t *src = data; uint8_t *dst = (uint8_t *)ctx->buf; while (len > 0) { n = SHA1_BLOCK_SIZE - bufpos(ctx); if (n > len) n = len; memcpy(dst + bufpos(ctx), src, n); src += n; len -= n; ctx->nbytes += n; if (bufpos(ctx) == 0) sha1_core(ctx, ctx->buf); } } void sha1_final(struct sha1_ctx *ctx, uint8_t *dst) { static const uint8_t padding[SHA1_BLOCK_SIZE] = { 0x80 }; uint64_t nbits = ctx->nbytes * 8; int pad_len, pos = bufpos(ctx); /* add padding */ pad_len = SHA1_BLOCK_SIZE - 8 - pos; if (pad_len <= 0) pad_len += SHA1_BLOCK_SIZE; sha1_update(ctx, padding, pad_len); /* add length */ ctx->buf[14] = htobe32(nbits >> 32); ctx->buf[15] = htobe32(nbits); /* final result */ sha1_core(ctx, ctx->buf); be32enc(dst + 0*4, ctx->a); be32enc(dst + 1*4, ctx->b); be32enc(dst + 2*4, ctx->c); be32enc(dst + 3*4, ctx->d); be32enc(dst + 4*4, ctx->e); } /* * DigestInfo */ static const struct DigestInfo sha1_info = { (DigestInitFunc *)sha1_reset, (DigestUpdateFunc *)sha1_update, (DigestFinalFunc *)sha1_final, sizeof(struct sha1_ctx), SHA1_DIGEST_LENGTH, SHA1_BLOCK_SIZE }; const struct DigestInfo *digest_SHA1(void) { return &sha1_info; } skytools-3.2.6/lib/usual/crypto/digest.h0000644000000000000000000000607012166266754015170 0ustar /* * Common API for cryptographic digests. * * Copyright (c) 2012 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * * Common API for cryptographic digests. */ #ifndef _USUAL_CRYPTO_DIGEST_H_ #define _USUAL_CRYPTO_DIGEST_H_ #include typedef void (DigestInitFunc)(void *ctx); typedef void (DigestUpdateFunc)(void *ctx, const void *, unsigned); typedef void (DigestFinalFunc)(void *ctx, uint8_t *); /** * Algoright info. */ struct DigestInfo { DigestInitFunc *init; DigestUpdateFunc *update; DigestFinalFunc *final; short state_len; short result_len; short block_len; }; /** * Algoright instance. */ struct DigestContext; /** * Allocate and initialize new algorithm instance. */ struct DigestContext *digest_new(const struct DigestInfo *impl, CxMem *cx); /** Hash more data */ void digest_update(struct DigestContext *ctx, const void *data, size_t len); /** * Get final result. * * To re-use same instance, digest_reset() must be called first. */ void digest_final(struct DigestContext *ctx, uint8_t *res); /** * Prepares instance for new data. */ void digest_reset(struct DigestContext *ctx); /** * Free instance. */ void digest_free(struct DigestContext *ctx); /** * Hash function block length in bytes. */ unsigned digest_block_len(struct DigestContext *ctx); /** * Hash function result length in bytes. */ unsigned digest_result_len(struct DigestContext *ctx); /* * Declare algorithm info's here instead per-also headers * to avoid unnecessary dependencies. */ /** MD5 message digest */ const struct DigestInfo *digest_MD5(void); /** SHA1 message digest */ const struct DigestInfo *digest_SHA1(void); /** SHA224 message digest */ const struct DigestInfo *digest_SHA224(void); /** SHA256 message digest */ const struct DigestInfo *digest_SHA256(void); /** SHA384 message digest */ const struct DigestInfo *digest_SHA384(void); /** SHA512 message digest */ const struct DigestInfo *digest_SHA512(void); /** Keccak-224 message digest */ const struct DigestInfo *digest_KECCAK224(void); /** Keccak-256 message digest */ const struct DigestInfo *digest_KECCAK256(void); /** Keccak-384 message digest */ const struct DigestInfo *digest_KECCAK384(void); /** Keccak-512 message digest */ const struct DigestInfo *digest_KECCAK512(void); /** Keccak in arbitrary output length mode */ const struct DigestInfo *digest_KECCAK_STREAM(void); #endif skytools-3.2.6/lib/usual/crypto/hmac.h0000644000000000000000000000274112166266754014622 0ustar /* * HMAC implementation based on OpenBSD * * Copyright (c) 2012 Daniel Farina * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * HMAC-SHA1 implementation (RFC2104). */ #ifndef _USUAL_CRYPTO_HMAC_H_ #define _USUAL_CRYPTO_HMAC_H_ #include /** HMAC Context */ struct HMAC; /** Create context with key */ struct HMAC *hmac_new(const struct DigestInfo *impl, const void *key, unsigned int key_len, CxMem *cx); /** Initialize context */ void hmac_reset(struct HMAC *ctx); /** Hash more data */ void hmac_update(struct HMAC *ctx, const void *data, unsigned int len); /** Get final result */ void hmac_final(struct HMAC *ctx, uint8_t *dst); unsigned hmac_block_len(struct HMAC *ctx); unsigned hmac_result_len(struct HMAC *ctx); #endif /* _USUAL_HMAC_H_ */ skytools-3.2.6/lib/usual/crypto/sha256.h0000644000000000000000000000301612166266754014716 0ustar /* * SHA2-256 implementation based on FIPS180-2. * * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _USUAL_CRYPTO_SHA256_H_ #define _USUAL_CRYPTO_SHA256_H_ #include #define SHA224_BLOCK_SIZE (16*4) #define SHA256_BLOCK_SIZE (16*4) #define SHA224_DIGEST_LENGTH (224/8) #define SHA256_DIGEST_LENGTH (256/8) struct sha256_ctx { union { uint32_t words[16]; uint8_t raw[16 * 4]; } buf; uint32_t state[8]; uint64_t nbytes; }; void sha256_reset(struct sha256_ctx *ctx); void sha256_update(struct sha256_ctx *ctx, const void *data, unsigned int len); void sha256_final(struct sha256_ctx *ctx, uint8_t *dst); void sha224_reset(struct sha256_ctx *ctx); void sha224_update(struct sha256_ctx *ctx, const void *data, unsigned int len); void sha224_final(struct sha256_ctx *ctx, uint8_t *dst); #endif skytools-3.2.6/lib/usual/crypto/md5.c0000644000000000000000000001304612166266754014372 0ustar /* * MD5 implementation based on RFC1321. * * Copyright (c) 2008 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include /* * Support functions. */ #define bufpos(ctx) ((ctx)->nbytes & (MD5_BLOCK_LENGTH - 1)) static inline void swap_words(uint32_t *w, int n) { #ifdef WORDS_BIGENDIAN for (; n > 0; w++, n--) *w = le32toh(*w); #endif } /* * MD5 core. */ #define F(X,Y,Z) ((X & Y) | ((~X) & Z)) #define G(X,Y,Z) ((X & Z) | (Y & (~Z))) #define H(X,Y,Z) (X ^ Y ^ Z) #define I(X,Y,Z) (Y ^ (X | (~Z))) #define OP(fn, a, b, c, d, k, s, T_i) \ a = b + rol32(a + fn(b, c, d) + X[k] + T_i, s) static void md5_mix(struct md5_ctx *ctx, const uint32_t *X) { uint32_t a, b, c, d; a = ctx->a; b = ctx->b; c = ctx->c; d = ctx->d; /* Round 1. */ OP(F, a, b, c, d, 0, 7, 0xd76aa478); OP(F, d, a, b, c, 1, 12, 0xe8c7b756); OP(F, c, d, a, b, 2, 17, 0x242070db); OP(F, b, c, d, a, 3, 22, 0xc1bdceee); OP(F, a, b, c, d, 4, 7, 0xf57c0faf); OP(F, d, a, b, c, 5, 12, 0x4787c62a); OP(F, c, d, a, b, 6, 17, 0xa8304613); OP(F, b, c, d, a, 7, 22, 0xfd469501); OP(F, a, b, c, d, 8, 7, 0x698098d8); OP(F, d, a, b, c, 9, 12, 0x8b44f7af); OP(F, c, d, a, b, 10, 17, 0xffff5bb1); OP(F, b, c, d, a, 11, 22, 0x895cd7be); OP(F, a, b, c, d, 12, 7, 0x6b901122); OP(F, d, a, b, c, 13, 12, 0xfd987193); OP(F, c, d, a, b, 14, 17, 0xa679438e); OP(F, b, c, d, a, 15, 22, 0x49b40821); /* Round 2. */ OP(G, a, b, c, d, 1, 5, 0xf61e2562); OP(G, d, a, b, c, 6, 9, 0xc040b340); OP(G, c, d, a, b, 11, 14, 0x265e5a51); OP(G, b, c, d, a, 0, 20, 0xe9b6c7aa); OP(G, a, b, c, d, 5, 5, 0xd62f105d); OP(G, d, a, b, c, 10, 9, 0x02441453); OP(G, c, d, a, b, 15, 14, 0xd8a1e681); OP(G, b, c, d, a, 4, 20, 0xe7d3fbc8); OP(G, a, b, c, d, 9, 5, 0x21e1cde6); OP(G, d, a, b, c, 14, 9, 0xc33707d6); OP(G, c, d, a, b, 3, 14, 0xf4d50d87); OP(G, b, c, d, a, 8, 20, 0x455a14ed); OP(G, a, b, c, d, 13, 5, 0xa9e3e905); OP(G, d, a, b, c, 2, 9, 0xfcefa3f8); OP(G, c, d, a, b, 7, 14, 0x676f02d9); OP(G, b, c, d, a, 12, 20, 0x8d2a4c8a); /* Round 3. */ OP(H, a, b, c, d, 5, 4, 0xfffa3942); OP(H, d, a, b, c, 8, 11, 0x8771f681); OP(H, c, d, a, b, 11, 16, 0x6d9d6122); OP(H, b, c, d, a, 14, 23, 0xfde5380c); OP(H, a, b, c, d, 1, 4, 0xa4beea44); OP(H, d, a, b, c, 4, 11, 0x4bdecfa9); OP(H, c, d, a, b, 7, 16, 0xf6bb4b60); OP(H, b, c, d, a, 10, 23, 0xbebfbc70); OP(H, a, b, c, d, 13, 4, 0x289b7ec6); OP(H, d, a, b, c, 0, 11, 0xeaa127fa); OP(H, c, d, a, b, 3, 16, 0xd4ef3085); OP(H, b, c, d, a, 6, 23, 0x04881d05); OP(H, a, b, c, d, 9, 4, 0xd9d4d039); OP(H, d, a, b, c, 12, 11, 0xe6db99e5); OP(H, c, d, a, b, 15, 16, 0x1fa27cf8); OP(H, b, c, d, a, 2, 23, 0xc4ac5665); /* Round 4. */ OP(I, a, b, c, d, 0, 6, 0xf4292244); OP(I, d, a, b, c, 7, 10, 0x432aff97); OP(I, c, d, a, b, 14, 15, 0xab9423a7); OP(I, b, c, d, a, 5, 21, 0xfc93a039); OP(I, a, b, c, d, 12, 6, 0x655b59c3); OP(I, d, a, b, c, 3, 10, 0x8f0ccc92); OP(I, c, d, a, b, 10, 15, 0xffeff47d); OP(I, b, c, d, a, 1, 21, 0x85845dd1); OP(I, a, b, c, d, 8, 6, 0x6fa87e4f); OP(I, d, a, b, c, 15, 10, 0xfe2ce6e0); OP(I, c, d, a, b, 6, 15, 0xa3014314); OP(I, b, c, d, a, 13, 21, 0x4e0811a1); OP(I, a, b, c, d, 4, 6, 0xf7537e82); OP(I, d, a, b, c, 11, 10, 0xbd3af235); OP(I, c, d, a, b, 2, 15, 0x2ad7d2bb); OP(I, b, c, d, a, 9, 21, 0xeb86d391); ctx->a += a; ctx->b += b; ctx->c += c; ctx->d += d; } /* * Public API. */ void md5_reset(struct md5_ctx *ctx) { ctx->nbytes = 0; ctx->a = 0x67452301; ctx->b = 0xefcdab89; ctx->c = 0x98badcfe; ctx->d = 0x10325476; } void md5_update(struct md5_ctx *ctx, const void *data, unsigned int len) { unsigned int n; const uint8_t *ptr = data; uint8_t *buf = (uint8_t *)ctx->buf; while (len > 0) { n = MD5_BLOCK_LENGTH - bufpos(ctx); if (n > len) n = len; memcpy(buf + bufpos(ctx), ptr, n); ptr += n; len -= n; ctx->nbytes += n; if (bufpos(ctx) == 0) { swap_words(ctx->buf, 16); md5_mix(ctx, ctx->buf); } } } void md5_final(struct md5_ctx *ctx, uint8_t *dst) { static const uint8_t padding[MD5_BLOCK_LENGTH] = { 0x80 }; uint64_t final_len = ctx->nbytes * 8; int pad_len, pos = bufpos(ctx); /* add padding */ pad_len = MD5_BLOCK_LENGTH - 8 - pos; if (pad_len <= 0) pad_len += MD5_BLOCK_LENGTH; md5_update(ctx, padding, pad_len); /* add length directly */ swap_words(ctx->buf, 14); ctx->buf[14] = final_len; ctx->buf[15] = final_len >> 32; /* final result */ md5_mix(ctx, ctx->buf); le32enc(dst + 0, ctx->a); le32enc(dst + 4, ctx->b); le32enc(dst + 8, ctx->c); le32enc(dst + 12, ctx->d); } /* * DigestInfo */ static const struct DigestInfo md5 = { (DigestInitFunc *)md5_reset, (DigestUpdateFunc *)md5_update, (DigestFinalFunc *)md5_final, sizeof(struct md5_ctx), MD5_DIGEST_LENGTH, MD5_BLOCK_LENGTH }; const struct DigestInfo *digest_MD5(void) { return &md5; } skytools-3.2.6/lib/usual/crypto/hmac.c0000644000000000000000000000536612166266754014623 0ustar /* * HMAC implementation based on OpenBSD hmac.c * * Copyright (c) 2012 Daniel Farina * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include struct HMAC { struct DigestContext *hash; CxMem *cx; uint8_t *ipad; uint8_t *opad; }; struct HMAC *hmac_new(const struct DigestInfo *impl, const void *key, unsigned int key_len, CxMem *cx) { struct DigestContext *hash; struct HMAC *hmac; unsigned bs = impl->block_len; unsigned i; /* load hash */ hash = digest_new(impl, cx); if (!hash) return NULL; /* struct setup */ hmac = cx_alloc0(cx, sizeof(struct HMAC) + 2*bs); if (!hmac) { digest_free(hash); return NULL; } hmac->hash = hash; hmac->cx = cx; hmac->ipad = (uint8_t *)(hmac + 1); hmac->opad = hmac->ipad + bs; /* copy key to pads */ if (key_len > bs) { digest_update(hash, key, key_len); digest_final(hash, hmac->ipad); digest_reset(hash); memcpy(hmac->opad, hmac->ipad, digest_result_len(hash)); } else { memcpy(hmac->ipad, key, key_len); memcpy(hmac->opad, key, key_len); } /* calculate pads */ for (i = 0; i < bs; i++) { hmac->ipad[i] ^= 0x36; hmac->opad[i] ^= 0x5c; } /* prepare for user data */ digest_update(hmac->hash, hmac->ipad, bs); return hmac; } /* Clean HMAC state */ void hmac_reset(struct HMAC *ctx) { unsigned bs = digest_block_len(ctx->hash); digest_reset(ctx->hash); digest_update(ctx->hash, ctx->ipad, bs); } /* Update HMAC state with more data */ void hmac_update(struct HMAC *ctx, const void *data, unsigned int len) { digest_update(ctx->hash, data, len); } /* Get final HMAC result */ void hmac_final(struct HMAC *ctx, uint8_t *dst) { unsigned bs = digest_block_len(ctx->hash); unsigned rs = digest_result_len(ctx->hash); digest_final(ctx->hash, dst); digest_reset(ctx->hash); digest_update(ctx->hash, ctx->opad, bs); digest_update(ctx->hash, dst, rs); digest_final(ctx->hash, dst); } unsigned hmac_block_len(struct HMAC *ctx) { return digest_block_len(ctx->hash); } unsigned hmac_result_len(struct HMAC *ctx) { return digest_result_len(ctx->hash); } skytools-3.2.6/lib/usual/crypto/keccak.h0000644000000000000000000000646312166266754015140 0ustar /* * Keccak implementation for SHA3 parameters. * * Copyright (c) 2012 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** @file * Keccak with SHA3 parameters. */ #ifndef _USUAL_CRYPTO_KECCAK_H_ #define _USUAL_CRYPTO_KECCAK_H_ #include /* * SHA3 fixed-length output modes. */ /** Output length for 224-bit mode (in bytes) */ #define KECCAK224_DIGEST_LENGTH (224/8) /** Output length for 256-bit mode (in bytes) */ #define KECCAK256_DIGEST_LENGTH (256/8) /** Output length for 384-bit mode (in bytes) */ #define KECCAK384_DIGEST_LENGTH (384/8) /** Output length for 512-bit mode (in bytes) */ #define KECCAK512_DIGEST_LENGTH (512/8) /** Number of data bytes processed in one loop. */ #define KECCAK224_BLOCK_SIZE (1152/8) /** Number of data bytes processed in one loop. */ #define KECCAK256_BLOCK_SIZE (1088/8) /** Number of data bytes processed in one loop. */ #define KECCAK384_BLOCK_SIZE (832/8) /** Number of data bytes processed in one loop. */ #define KECCAK512_BLOCK_SIZE (576/8) /** Number of data bytes processed in one go. */ #define KECCAK_STREAM_BLOCK_SIZE (1024/8) /** * Output length for stream mode (in bytes). * * This means output from single call to keccak_final(). * It can be called repeatedly to get more output. */ #define KECCAK_STREAM_DIGEST_LENGTH KECCAK_STREAM_BLOCK_SIZE /** * Keccak state structure for all modes. */ struct KeccakContext { /* 5*5*64 bit state */ union { uint64_t state64[25]; uint32_t state32[2*25]; } u; uint16_t bytes; /* current number of bytes in buffer */ uint16_t rbytes; /* number of bytes in one step */ uint16_t obytes; /* output bytes */ uint16_t padded; /* is padding added? */ }; /** SHA3 fixed length output mode. */ void keccak224_init(struct KeccakContext *ctx); /** SHA3 fixed length output mode. */ void keccak256_init(struct KeccakContext *ctx); /** SHA3 fixed length output mode. */ void keccak384_init(struct KeccakContext *ctx); /** SHA3 fixed length output mode. */ void keccak512_init(struct KeccakContext *ctx); /** * SHA3 stream mode for Keccak. * * In stream mode, keccak_final() can be called repeatedly * to get output stream of unlimited length. * * On each call it outputs next 128 bytes (1024 bits). */ void keccak_stream_init(struct KeccakContext *ctx); /** * Hash additional data. */ void keccak_update(struct KeccakContext *ctx, const void *data, unsigned int len); /** * Return final result. * * Output length depends on mode. See KECCAK*_DIGEST_LENGTH * constants to get length for particular mode. * * In stream mode, this can be called repeatedly. */ void keccak_final(struct KeccakContext *ctx, uint8_t *dst); #endif skytools-3.2.6/lib/usual/crypto/digest.c0000644000000000000000000000365312166266754015167 0ustar /* * Common API for cryptographic digests. * * Copyright (c) 2012 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include struct DigestContext { const struct DigestInfo *impl; CxMem *cx; uint64_t state[1]; }; struct DigestContext *digest_new(const struct DigestInfo *impl, CxMem *cx) { struct DigestContext *ctx; unsigned alloc; alloc = offsetof(struct DigestContext, state) + impl->state_len; ctx = cx_alloc(cx, alloc); if (!ctx) return NULL; ctx->impl = impl; ctx->cx = cx; impl->init(ctx->state); return ctx; } void digest_update(struct DigestContext *ctx, const void *data, size_t len) { ctx->impl->update(ctx->state, data, len); } void digest_final(struct DigestContext *ctx, uint8_t *res) { ctx->impl->final(ctx->state, res); } void digest_reset(struct DigestContext *ctx) { ctx->impl->init(ctx->state); } void digest_free(struct DigestContext *ctx) { CxMem *cx = ctx->cx; unsigned alloc = offsetof(struct DigestContext, state) + ctx->impl->state_len; memset(ctx, 0, alloc); cx_free(cx, ctx); } unsigned digest_block_len(struct DigestContext *ctx) { return ctx->impl->block_len; } unsigned digest_result_len(struct DigestContext *ctx) { return ctx->impl->result_len; } skytools-3.2.6/lib/usual/crypto/sha256.c0000644000000000000000000001327412166266754014720 0ustar /* * SHA2-256 implementation based on FIPS180-2. * * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include /* repeat with increasing offset */ #define R4(R, t) R(t+0); R(t+1); R(t+2); R(t+3) #define R16(R, t) R4(R, t+0); R4(R, t+4); R4(R, t+8); R4(R, t+12) #define R64(R, t) R16(R, t+0); R16(R, t+16); R16(R, t+32); R16(R, t+48); #define bufpos(ctx) ((ctx)->nbytes & (SHA256_BLOCK_SIZE - 1)) /* * initial values */ static const uint32_t H224[8] = { 0xc1059ed8, 0x367cd507, 0x3070dd17, 0xf70e5939, 0xffc00b31, 0x68581511, 0x64f98fa7, 0xbefa4fa4, }; static const uint32_t H256[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19, }; /* * constants for mixing */ static const uint32_t K[64] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2, }; /* * mixing */ #define CH(x,y,z) ((x & y) ^ ((~x) & z)) #define MAJ(x,y,z) ((x & y) ^ (x & z) ^ (y & z)) #define E0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22)) #define E1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25)) #define O0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3)) #define O1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10)) #define W(n) (ctx->buf.words[(n) & 15]) #define setW(n,v) W(n) = (v) #define SHA256_ROUND(_t) do { \ uint32_t tmp1, tmp2, t = (_t); \ if (t >= 16) { \ setW(t, O1(W(t - 2)) + W(t - 7) + O0(W(t - 15)) + W(t - 16)); \ } else { \ /* convert endianess on first go */ \ setW(t, be32toh(W(t))); \ } \ tmp1 = h + E1(e) + CH(e,f,g) + K[k_pos++] + W(t); \ tmp2 = E0(a) + MAJ(a,b,c); \ h = g; g = f; f = e; e = d + tmp1; d = c; c = b; b = a; a = tmp1 + tmp2; \ } while (0) /* * actual core */ static void sha256_core(struct sha256_ctx *ctx) { uint32_t *state = ctx->state; uint32_t a = state[0], b = state[1], c = state[2], d = state[3]; uint32_t e = state[4], f = state[5], g = state[6], h = state[7]; unsigned k_pos = 0; R16(SHA256_ROUND, 0); while (k_pos < 64) { R16(SHA256_ROUND, 16); } state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; state[5] += f; state[6] += g; state[7] += h; } /* * Public API for SHA256. */ void sha256_reset(struct sha256_ctx *ctx) { memset(ctx, 0, sizeof(*ctx)); memcpy(ctx->state, H256, sizeof(H256)); } void sha256_update(struct sha256_ctx *ctx, const void *data, unsigned int len) { unsigned int n; const uint8_t *src = data; uint8_t *dst = ctx->buf.raw; while (len > 0) { n = SHA256_BLOCK_SIZE - bufpos(ctx); if (n > len) n = len; memcpy(dst + bufpos(ctx), src, n); src += n; len -= n; ctx->nbytes += n; if (bufpos(ctx) == 0) sha256_core(ctx); } } void sha256_final(struct sha256_ctx *ctx, uint8_t *dst) { static const uint8_t padding[SHA256_BLOCK_SIZE] = { 0x80 }; uint64_t nbits = ctx->nbytes * 8; int pad_len, pos = bufpos(ctx); int i; /* add padding */ pad_len = SHA256_BLOCK_SIZE - 8 - pos; if (pad_len <= 0) pad_len += SHA256_BLOCK_SIZE; sha256_update(ctx, padding, pad_len); /* add length */ ctx->buf.words[14] = htobe32(nbits >> 32); ctx->buf.words[15] = htobe32(nbits); /* final result */ sha256_core(ctx); for (i = 0; i < SHA256_DIGEST_LENGTH / 4; i++) be32enc(dst + i*4, ctx->state[i]); } /* * Public API for SHA224. */ void sha224_reset(struct sha256_ctx *ctx) { memset(ctx, 0, sizeof(*ctx)); memcpy(ctx->state, H224, sizeof(H224)); } void sha224_update(struct sha256_ctx *ctx, const void *data, unsigned int len) { sha256_update(ctx, data, len); } void sha224_final(struct sha256_ctx *ctx, uint8_t *dst) { uint8_t buf[SHA256_DIGEST_LENGTH]; sha256_final(ctx, buf); memcpy(dst, buf, SHA224_DIGEST_LENGTH); memset(buf, 0, sizeof(buf)); } /* * DigestInfo */ const struct DigestInfo *digest_SHA224(void) { static const struct DigestInfo info = { (DigestInitFunc *)sha224_reset, (DigestUpdateFunc *)sha224_update, (DigestFinalFunc *)sha224_final, sizeof(struct sha256_ctx), SHA224_DIGEST_LENGTH, SHA224_BLOCK_SIZE }; return &info; } const struct DigestInfo *digest_SHA256(void) { static const struct DigestInfo info = { (DigestInitFunc *)sha256_reset, (DigestUpdateFunc *)sha256_update, (DigestFinalFunc *)sha256_final, sizeof(struct sha256_ctx), SHA256_DIGEST_LENGTH, SHA256_BLOCK_SIZE }; return &info; } skytools-3.2.6/lib/usual/crypto/keccak.c0000644000000000000000000010131412166266754015122 0ustar /* * Keccak implementation for SHA3 parameters. * * Copyright (c) 2012 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * Based on public-domain Keccak-inplace.c and Keccak-inplace32BI.c * implementations from Keccak reference code: * * The Keccak sponge function, designed by Guido Bertoni, Joan Daemen, * Michaël Peeters and Gilles Van Assche. For more information, feedback or * questions, please refer to our website: http://keccak.noekeon.org/ * * Implementation by Ronny Van Keer and the designers, * hereby denoted as "the implementer". * * To the extent possible under law, the implementer has waived all copyright * and related or neighboring rights to the source code in this file. * http://creativecommons.org/publicdomain/zero/1.0/ * * 32-bit word interlacing algorithm: * * Henry S. Warren, Hacker's Delight, Addison-Wesley, 2002 */ #include #include #include #include #include #include /* * Decide whether to use 64- or 32-bit implementation. */ #if !defined(KECCAK_64BIT) && !defined(KECCAK_32BIT) /* If neither is defined, try to autodetect */ #if (LONG_MAX > 0xFFFFFFFF) || (UINTPTR_MAX > 0xFFFFFFFF) /* use 64-bit implementation if 'long' or 'uintptr_t' is 64-bit */ #define KECCAK_64BIT #else /* otherwise, use 32-bit implementation */ #define KECCAK_32BIT #endif #endif /* For SHA3 variant of Keccak */ #define KECCAK_ROUNDS 24 #ifdef KECCAK_64BIT /* * 64-bit implementation - one lane is one 64-bit word. */ /* round constants */ static const uint64_t RoundConstants64[KECCAK_ROUNDS] = { UINT64_C(0x0000000000000001), UINT64_C(0x0000000000008082), UINT64_C(0x800000000000808A), UINT64_C(0x8000000080008000), UINT64_C(0x000000000000808B), UINT64_C(0x0000000080000001), UINT64_C(0x8000000080008081), UINT64_C(0x8000000000008009), UINT64_C(0x000000000000008A), UINT64_C(0x0000000000000088), UINT64_C(0x0000000080008009), UINT64_C(0x000000008000000A), UINT64_C(0x000000008000808B), UINT64_C(0x800000000000008B), UINT64_C(0x8000000000008089), UINT64_C(0x8000000000008003), UINT64_C(0x8000000000008002), UINT64_C(0x8000000000000080), UINT64_C(0x000000000000800A), UINT64_C(0x800000008000000A), UINT64_C(0x8000000080008081), UINT64_C(0x8000000000008080), UINT64_C(0x0000000080000001), UINT64_C(0x8000000080008008), }; static void keccak_f(struct KeccakContext *ctx) { uint64_t *state = ctx->u.state64; uint64_t Ba, Be, Bi, Bo, Bu; uint64_t Ca, Ce, Ci, Co, Cu; uint64_t Da, De, Di, Do, Du; int i; #define Aba state[ 0] #define Abe state[ 1] #define Abi state[ 2] #define Abo state[ 3] #define Abu state[ 4] #define Aga state[ 5] #define Age state[ 6] #define Agi state[ 7] #define Ago state[ 8] #define Agu state[ 9] #define Aka state[10] #define Ake state[11] #define Aki state[12] #define Ako state[13] #define Aku state[14] #define Ama state[15] #define Ame state[16] #define Ami state[17] #define Amo state[18] #define Amu state[19] #define Asa state[20] #define Ase state[21] #define Asi state[22] #define Aso state[23] #define Asu state[24] for (i = 0; i < KECCAK_ROUNDS; i += 4) { /* Code for 4 rounds */ Ca = Aba^Aga^Aka^Ama^Asa; Ce = Abe^Age^Ake^Ame^Ase; Ci = Abi^Agi^Aki^Ami^Asi; Co = Abo^Ago^Ako^Amo^Aso; Cu = Abu^Agu^Aku^Amu^Asu; Da = Cu^rol64(Ce, 1); De = Ca^rol64(Ci, 1); Di = Ce^rol64(Co, 1); Do = Ci^rol64(Cu, 1); Du = Co^rol64(Ca, 1); Ba = (Aba^Da); Be = rol64((Age^De), 44); Bi = rol64((Aki^Di), 43); Bo = rol64((Amo^Do), 21); Bu = rol64((Asu^Du), 14); Aba = Ba ^((~Be)& Bi ); Aba ^= RoundConstants64[i+0]; Age = Be ^((~Bi)& Bo ); Aki = Bi ^((~Bo)& Bu ); Amo = Bo ^((~Bu)& Ba ); Asu = Bu ^((~Ba)& Be ); Bi = rol64((Aka^Da), 3); Bo = rol64((Ame^De), 45); Bu = rol64((Asi^Di), 61); Ba = rol64((Abo^Do), 28); Be = rol64((Agu^Du), 20); Aka = Ba ^((~Be)& Bi ); Ame = Be ^((~Bi)& Bo ); Asi = Bi ^((~Bo)& Bu ); Abo = Bo ^((~Bu)& Ba ); Agu = Bu ^((~Ba)& Be ); Bu = rol64((Asa^Da), 18); Ba = rol64((Abe^De), 1); Be = rol64((Agi^Di), 6); Bi = rol64((Ako^Do), 25); Bo = rol64((Amu^Du), 8); Asa = Ba ^((~Be)& Bi ); Abe = Be ^((~Bi)& Bo ); Agi = Bi ^((~Bo)& Bu ); Ako = Bo ^((~Bu)& Ba ); Amu = Bu ^((~Ba)& Be ); Be = rol64((Aga^Da), 36); Bi = rol64((Ake^De), 10); Bo = rol64((Ami^Di), 15); Bu = rol64((Aso^Do), 56); Ba = rol64((Abu^Du), 27); Aga = Ba ^((~Be)& Bi ); Ake = Be ^((~Bi)& Bo ); Ami = Bi ^((~Bo)& Bu ); Aso = Bo ^((~Bu)& Ba ); Abu = Bu ^((~Ba)& Be ); Bo = rol64((Ama^Da), 41); Bu = rol64((Ase^De), 2); Ba = rol64((Abi^Di), 62); Be = rol64((Ago^Do), 55); Bi = rol64((Aku^Du), 39); Ama = Ba ^((~Be)& Bi ); Ase = Be ^((~Bi)& Bo ); Abi = Bi ^((~Bo)& Bu ); Ago = Bo ^((~Bu)& Ba ); Aku = Bu ^((~Ba)& Be ); Ca = Aba^Aka^Asa^Aga^Ama; Ce = Age^Ame^Abe^Ake^Ase; Ci = Aki^Asi^Agi^Ami^Abi; Co = Amo^Abo^Ako^Aso^Ago; Cu = Asu^Agu^Amu^Abu^Aku; Da = Cu^rol64(Ce, 1); De = Ca^rol64(Ci, 1); Di = Ce^rol64(Co, 1); Do = Ci^rol64(Cu, 1); Du = Co^rol64(Ca, 1); Ba = (Aba^Da); Be = rol64((Ame^De), 44); Bi = rol64((Agi^Di), 43); Bo = rol64((Aso^Do), 21); Bu = rol64((Aku^Du), 14); Aba = Ba ^((~Be)& Bi ); Aba ^= RoundConstants64[i+1]; Ame = Be ^((~Bi)& Bo ); Agi = Bi ^((~Bo)& Bu ); Aso = Bo ^((~Bu)& Ba ); Aku = Bu ^((~Ba)& Be ); Bi = rol64((Asa^Da), 3); Bo = rol64((Ake^De), 45); Bu = rol64((Abi^Di), 61); Ba = rol64((Amo^Do), 28); Be = rol64((Agu^Du), 20); Asa = Ba ^((~Be)& Bi ); Ake = Be ^((~Bi)& Bo ); Abi = Bi ^((~Bo)& Bu ); Amo = Bo ^((~Bu)& Ba ); Agu = Bu ^((~Ba)& Be ); Bu = rol64((Ama^Da), 18); Ba = rol64((Age^De), 1); Be = rol64((Asi^Di), 6); Bi = rol64((Ako^Do), 25); Bo = rol64((Abu^Du), 8); Ama = Ba ^((~Be)& Bi ); Age = Be ^((~Bi)& Bo ); Asi = Bi ^((~Bo)& Bu ); Ako = Bo ^((~Bu)& Ba ); Abu = Bu ^((~Ba)& Be ); Be = rol64((Aka^Da), 36); Bi = rol64((Abe^De), 10); Bo = rol64((Ami^Di), 15); Bu = rol64((Ago^Do), 56); Ba = rol64((Asu^Du), 27); Aka = Ba ^((~Be)& Bi ); Abe = Be ^((~Bi)& Bo ); Ami = Bi ^((~Bo)& Bu ); Ago = Bo ^((~Bu)& Ba ); Asu = Bu ^((~Ba)& Be ); Bo = rol64((Aga^Da), 41); Bu = rol64((Ase^De), 2); Ba = rol64((Aki^Di), 62); Be = rol64((Abo^Do), 55); Bi = rol64((Amu^Du), 39); Aga = Ba ^((~Be)& Bi ); Ase = Be ^((~Bi)& Bo ); Aki = Bi ^((~Bo)& Bu ); Abo = Bo ^((~Bu)& Ba ); Amu = Bu ^((~Ba)& Be ); Ca = Aba^Asa^Ama^Aka^Aga; Ce = Ame^Ake^Age^Abe^Ase; Ci = Agi^Abi^Asi^Ami^Aki; Co = Aso^Amo^Ako^Ago^Abo; Cu = Aku^Agu^Abu^Asu^Amu; Da = Cu^rol64(Ce, 1); De = Ca^rol64(Ci, 1); Di = Ce^rol64(Co, 1); Do = Ci^rol64(Cu, 1); Du = Co^rol64(Ca, 1); Ba = (Aba^Da); Be = rol64((Ake^De), 44); Bi = rol64((Asi^Di), 43); Bo = rol64((Ago^Do), 21); Bu = rol64((Amu^Du), 14); Aba = Ba ^((~Be)& Bi ); Aba ^= RoundConstants64[i+2]; Ake = Be ^((~Bi)& Bo ); Asi = Bi ^((~Bo)& Bu ); Ago = Bo ^((~Bu)& Ba ); Amu = Bu ^((~Ba)& Be ); Bi = rol64((Ama^Da), 3); Bo = rol64((Abe^De), 45); Bu = rol64((Aki^Di), 61); Ba = rol64((Aso^Do), 28); Be = rol64((Agu^Du), 20); Ama = Ba ^((~Be)& Bi ); Abe = Be ^((~Bi)& Bo ); Aki = Bi ^((~Bo)& Bu ); Aso = Bo ^((~Bu)& Ba ); Agu = Bu ^((~Ba)& Be ); Bu = rol64((Aga^Da), 18); Ba = rol64((Ame^De), 1); Be = rol64((Abi^Di), 6); Bi = rol64((Ako^Do), 25); Bo = rol64((Asu^Du), 8); Aga = Ba ^((~Be)& Bi ); Ame = Be ^((~Bi)& Bo ); Abi = Bi ^((~Bo)& Bu ); Ako = Bo ^((~Bu)& Ba ); Asu = Bu ^((~Ba)& Be ); Be = rol64((Asa^Da), 36); Bi = rol64((Age^De), 10); Bo = rol64((Ami^Di), 15); Bu = rol64((Abo^Do), 56); Ba = rol64((Aku^Du), 27); Asa = Ba ^((~Be)& Bi ); Age = Be ^((~Bi)& Bo ); Ami = Bi ^((~Bo)& Bu ); Abo = Bo ^((~Bu)& Ba ); Aku = Bu ^((~Ba)& Be ); Bo = rol64((Aka^Da), 41); Bu = rol64((Ase^De), 2); Ba = rol64((Agi^Di), 62); Be = rol64((Amo^Do), 55); Bi = rol64((Abu^Du), 39); Aka = Ba ^((~Be)& Bi ); Ase = Be ^((~Bi)& Bo ); Agi = Bi ^((~Bo)& Bu ); Amo = Bo ^((~Bu)& Ba ); Abu = Bu ^((~Ba)& Be ); Ca = Aba^Ama^Aga^Asa^Aka; Ce = Ake^Abe^Ame^Age^Ase; Ci = Asi^Aki^Abi^Ami^Agi; Co = Ago^Aso^Ako^Abo^Amo; Cu = Amu^Agu^Asu^Aku^Abu; Da = Cu^rol64(Ce, 1); De = Ca^rol64(Ci, 1); Di = Ce^rol64(Co, 1); Do = Ci^rol64(Cu, 1); Du = Co^rol64(Ca, 1); Ba = (Aba^Da); Be = rol64((Abe^De), 44); Bi = rol64((Abi^Di), 43); Bo = rol64((Abo^Do), 21); Bu = rol64((Abu^Du), 14); Aba = Ba ^((~Be)& Bi ); Aba ^= RoundConstants64[i+3]; Abe = Be ^((~Bi)& Bo ); Abi = Bi ^((~Bo)& Bu ); Abo = Bo ^((~Bu)& Ba ); Abu = Bu ^((~Ba)& Be ); Bi = rol64((Aga^Da), 3); Bo = rol64((Age^De), 45); Bu = rol64((Agi^Di), 61); Ba = rol64((Ago^Do), 28); Be = rol64((Agu^Du), 20); Aga = Ba ^((~Be)& Bi ); Age = Be ^((~Bi)& Bo ); Agi = Bi ^((~Bo)& Bu ); Ago = Bo ^((~Bu)& Ba ); Agu = Bu ^((~Ba)& Be ); Bu = rol64((Aka^Da), 18); Ba = rol64((Ake^De), 1); Be = rol64((Aki^Di), 6); Bi = rol64((Ako^Do), 25); Bo = rol64((Aku^Du), 8); Aka = Ba ^((~Be)& Bi ); Ake = Be ^((~Bi)& Bo ); Aki = Bi ^((~Bo)& Bu ); Ako = Bo ^((~Bu)& Ba ); Aku = Bu ^((~Ba)& Be ); Be = rol64((Ama^Da), 36); Bi = rol64((Ame^De), 10); Bo = rol64((Ami^Di), 15); Bu = rol64((Amo^Do), 56); Ba = rol64((Amu^Du), 27); Ama = Ba ^((~Be)& Bi ); Ame = Be ^((~Bi)& Bo ); Ami = Bi ^((~Bo)& Bu ); Amo = Bo ^((~Bu)& Ba ); Amu = Bu ^((~Ba)& Be ); Bo = rol64((Asa^Da), 41); Bu = rol64((Ase^De), 2); Ba = rol64((Asi^Di), 62); Be = rol64((Aso^Do), 55); Bi = rol64((Asu^Du), 39); Asa = Ba ^((~Be)& Bi ); Ase = Be ^((~Bi)& Bo ); Asi = Bi ^((~Bo)& Bu ); Aso = Bo ^((~Bu)& Ba ); Asu = Bu ^((~Ba)& Be ); } } static inline void xor_lane(struct KeccakContext *ctx, int lane, uint64_t val) { ctx->u.state64[lane] ^= val; } static void extract(uint8_t *dst, const struct KeccakContext *ctx, int laneCount) { const uint64_t *src = ctx->u.state64; while (laneCount--) { le64enc(dst, *src++); dst += 8; } } #else /* KECCAK_32BIT */ /* * 32-bit implementation - one 64-bit lane is mapped * to two interleaved 32-bit words. */ static const uint32_t RoundConstants32[2*KECCAK_ROUNDS] = { 0x00000001, 0x00000000, 0x00000000, 0x00000089, 0x00000000, 0x8000008b, 0x00000000, 0x80008080, 0x00000001, 0x0000008b, 0x00000001, 0x00008000, 0x00000001, 0x80008088, 0x00000001, 0x80000082, 0x00000000, 0x0000000b, 0x00000000, 0x0000000a, 0x00000001, 0x00008082, 0x00000000, 0x00008003, 0x00000001, 0x0000808b, 0x00000001, 0x8000000b, 0x00000001, 0x8000008a, 0x00000001, 0x80000081, 0x00000000, 0x80000081, 0x00000000, 0x80000008, 0x00000000, 0x00000083, 0x00000000, 0x80008003, 0x00000001, 0x80008088, 0x00000000, 0x80000088, 0x00000001, 0x00008000, 0x00000000, 0x80008082, }; #define KeccakAtoD_round0() \ Cx = Abu0^Agu0^Aku0^Amu0^Asu0; \ Du1 = Abe1^Age1^Ake1^Ame1^Ase1; \ Da0 = Cx^rol32(Du1, 1); \ Cz = Abu1^Agu1^Aku1^Amu1^Asu1; \ Du0 = Abe0^Age0^Ake0^Ame0^Ase0; \ Da1 = Cz^Du0; \ \ Cw = Abi0^Agi0^Aki0^Ami0^Asi0; \ Do0 = Cw^rol32(Cz, 1); \ Cy = Abi1^Agi1^Aki1^Ami1^Asi1; \ Do1 = Cy^Cx; \ \ Cx = Aba0^Aga0^Aka0^Ama0^Asa0; \ De0 = Cx^rol32(Cy, 1); \ Cz = Aba1^Aga1^Aka1^Ama1^Asa1; \ De1 = Cz^Cw; \ \ Cy = Abo1^Ago1^Ako1^Amo1^Aso1; \ Di0 = Du0^rol32(Cy, 1); \ Cw = Abo0^Ago0^Ako0^Amo0^Aso0; \ Di1 = Du1^Cw; \ \ Du0 = Cw^rol32(Cz, 1); \ Du1 = Cy^Cx; #define KeccakAtoD_round1() \ Cx = Asu0^Agu0^Amu0^Abu1^Aku1; \ Du1 = Age1^Ame0^Abe0^Ake1^Ase1; \ Da0 = Cx^rol32(Du1, 1); \ Cz = Asu1^Agu1^Amu1^Abu0^Aku0; \ Du0 = Age0^Ame1^Abe1^Ake0^Ase0; \ Da1 = Cz^Du0; \ \ Cw = Aki1^Asi1^Agi0^Ami1^Abi0; \ Do0 = Cw^rol32(Cz, 1); \ Cy = Aki0^Asi0^Agi1^Ami0^Abi1; \ Do1 = Cy^Cx; \ \ Cx = Aba0^Aka1^Asa0^Aga0^Ama1; \ De0 = Cx^rol32(Cy, 1); \ Cz = Aba1^Aka0^Asa1^Aga1^Ama0; \ De1 = Cz^Cw; \ \ Cy = Amo0^Abo1^Ako0^Aso1^Ago0; \ Di0 = Du0^rol32(Cy, 1); \ Cw = Amo1^Abo0^Ako1^Aso0^Ago1; \ Di1 = Du1^Cw; \ \ Du0 = Cw^rol32(Cz, 1); \ Du1 = Cy^Cx; #define KeccakAtoD_round2() \ Cx = Aku1^Agu0^Abu1^Asu1^Amu1; \ Du1 = Ame0^Ake0^Age0^Abe0^Ase1; \ Da0 = Cx^rol32(Du1, 1); \ Cz = Aku0^Agu1^Abu0^Asu0^Amu0; \ Du0 = Ame1^Ake1^Age1^Abe1^Ase0; \ Da1 = Cz^Du0; \ \ Cw = Agi1^Abi1^Asi1^Ami0^Aki1; \ Do0 = Cw^rol32(Cz, 1); \ Cy = Agi0^Abi0^Asi0^Ami1^Aki0; \ Do1 = Cy^Cx; \ \ Cx = Aba0^Asa1^Ama1^Aka1^Aga1; \ De0 = Cx^rol32(Cy, 1); \ Cz = Aba1^Asa0^Ama0^Aka0^Aga0; \ De1 = Cz^Cw; \ \ Cy = Aso0^Amo0^Ako1^Ago0^Abo0; \ Di0 = Du0^rol32(Cy, 1); \ Cw = Aso1^Amo1^Ako0^Ago1^Abo1; \ Di1 = Du1^Cw; \ \ Du0 = Cw^rol32(Cz, 1); \ Du1 = Cy^Cx; #define KeccakAtoD_round3() \ Cx = Amu1^Agu0^Asu1^Aku0^Abu0; \ Du1 = Ake0^Abe1^Ame1^Age0^Ase1; \ Da0 = Cx^rol32(Du1, 1); \ Cz = Amu0^Agu1^Asu0^Aku1^Abu1; \ Du0 = Ake1^Abe0^Ame0^Age1^Ase0; \ Da1 = Cz^Du0; \ \ Cw = Asi0^Aki0^Abi1^Ami1^Agi1; \ Do0 = Cw^rol32(Cz, 1); \ Cy = Asi1^Aki1^Abi0^Ami0^Agi0; \ Do1 = Cy^Cx; \ \ Cx = Aba0^Ama0^Aga1^Asa1^Aka0; \ De0 = Cx^rol32(Cy, 1); \ Cz = Aba1^Ama1^Aga0^Asa0^Aka1; \ De1 = Cz^Cw; \ \ Cy = Ago1^Aso0^Ako0^Abo0^Amo1; \ Di0 = Du0^rol32(Cy, 1); \ Cw = Ago0^Aso1^Ako1^Abo1^Amo0; \ Di1 = Du1^Cw; \ \ Du0 = Cw^rol32(Cz, 1); \ Du1 = Cy^Cx; static void keccak_f(struct KeccakContext *ctx) { uint32_t *state = ctx->u.state32; uint32_t Da0, De0, Di0, Do0, Du0; uint32_t Da1, De1, Di1, Do1, Du1; uint32_t Ca0, Ce0, Ci0, Co0, Cu0; uint32_t Cx, Cy, Cz, Cw; int i; #define Ba Ca0 #define Be Ce0 #define Bi Ci0 #define Bo Co0 #define Bu Cu0 #define Aba0 state[ 0] #define Aba1 state[ 1] #define Abe0 state[ 2] #define Abe1 state[ 3] #define Abi0 state[ 4] #define Abi1 state[ 5] #define Abo0 state[ 6] #define Abo1 state[ 7] #define Abu0 state[ 8] #define Abu1 state[ 9] #define Aga0 state[10] #define Aga1 state[11] #define Age0 state[12] #define Age1 state[13] #define Agi0 state[14] #define Agi1 state[15] #define Ago0 state[16] #define Ago1 state[17] #define Agu0 state[18] #define Agu1 state[19] #define Aka0 state[20] #define Aka1 state[21] #define Ake0 state[22] #define Ake1 state[23] #define Aki0 state[24] #define Aki1 state[25] #define Ako0 state[26] #define Ako1 state[27] #define Aku0 state[28] #define Aku1 state[29] #define Ama0 state[30] #define Ama1 state[31] #define Ame0 state[32] #define Ame1 state[33] #define Ami0 state[34] #define Ami1 state[35] #define Amo0 state[36] #define Amo1 state[37] #define Amu0 state[38] #define Amu1 state[39] #define Asa0 state[40] #define Asa1 state[41] #define Ase0 state[42] #define Ase1 state[43] #define Asi0 state[44] #define Asi1 state[45] #define Aso0 state[46] #define Aso1 state[47] #define Asu0 state[48] #define Asu1 state[49] for (i = 0; i < KECCAK_ROUNDS*2; i += 8) { /* Code for 4 rounds */ KeccakAtoD_round0(); Ba = (Aba0^Da0); Be = rol32((Age0^De0), 22); Bi = rol32((Aki1^Di1), 22); Bo = rol32((Amo1^Do1), 11); Bu = rol32((Asu0^Du0), 7); Aba0 = Ba ^((~Be)& Bi ); Aba0 ^= RoundConstants32[i+0]; Age0 = Be ^((~Bi)& Bo ); Aki1 = Bi ^((~Bo)& Bu ); Amo1 = Bo ^((~Bu)& Ba ); Asu0 = Bu ^((~Ba)& Be ); Ba = (Aba1^Da1); Be = rol32((Age1^De1), 22); Bi = rol32((Aki0^Di0), 21); Bo = rol32((Amo0^Do0), 10); Bu = rol32((Asu1^Du1), 7); Aba1 = Ba ^((~Be)& Bi ); Aba1 ^= RoundConstants32[i+1]; Age1 = Be ^((~Bi)& Bo ); Aki0 = Bi ^((~Bo)& Bu ); Amo0 = Bo ^((~Bu)& Ba ); Asu1 = Bu ^((~Ba)& Be ); Bi = rol32((Aka1^Da1), 2); Bo = rol32((Ame1^De1), 23); Bu = rol32((Asi1^Di1), 31); Ba = rol32((Abo0^Do0), 14); Be = rol32((Agu0^Du0), 10); Aka1 = Ba ^((~Be)& Bi ); Ame1 = Be ^((~Bi)& Bo ); Asi1 = Bi ^((~Bo)& Bu ); Abo0 = Bo ^((~Bu)& Ba ); Agu0 = Bu ^((~Ba)& Be ); Bi = rol32((Aka0^Da0), 1); Bo = rol32((Ame0^De0), 22); Bu = rol32((Asi0^Di0), 30); Ba = rol32((Abo1^Do1), 14); Be = rol32((Agu1^Du1), 10); Aka0 = Ba ^((~Be)& Bi ); Ame0 = Be ^((~Bi)& Bo ); Asi0 = Bi ^((~Bo)& Bu ); Abo1 = Bo ^((~Bu)& Ba ); Agu1 = Bu ^((~Ba)& Be ); Bu = rol32((Asa0^Da0), 9); Ba = rol32((Abe1^De1), 1); Be = rol32((Agi0^Di0), 3); Bi = rol32((Ako1^Do1), 13); Bo = rol32((Amu0^Du0), 4); Asa0 = Ba ^((~Be)& Bi ); Abe1 = Be ^((~Bi)& Bo ); Agi0 = Bi ^((~Bo)& Bu ); Ako1 = Bo ^((~Bu)& Ba ); Amu0 = Bu ^((~Ba)& Be ); Bu = rol32((Asa1^Da1), 9); Ba = (Abe0^De0); Be = rol32((Agi1^Di1), 3); Bi = rol32((Ako0^Do0), 12); Bo = rol32((Amu1^Du1), 4); Asa1 = Ba ^((~Be)& Bi ); Abe0 = Be ^((~Bi)& Bo ); Agi1 = Bi ^((~Bo)& Bu ); Ako0 = Bo ^((~Bu)& Ba ); Amu1 = Bu ^((~Ba)& Be ); Be = rol32((Aga0^Da0), 18); Bi = rol32((Ake0^De0), 5); Bo = rol32((Ami1^Di1), 8); Bu = rol32((Aso0^Do0), 28); Ba = rol32((Abu1^Du1), 14); Aga0 = Ba ^((~Be)& Bi ); Ake0 = Be ^((~Bi)& Bo ); Ami1 = Bi ^((~Bo)& Bu ); Aso0 = Bo ^((~Bu)& Ba ); Abu1 = Bu ^((~Ba)& Be ); Be = rol32((Aga1^Da1), 18); Bi = rol32((Ake1^De1), 5); Bo = rol32((Ami0^Di0), 7); Bu = rol32((Aso1^Do1), 28); Ba = rol32((Abu0^Du0), 13); Aga1 = Ba ^((~Be)& Bi ); Ake1 = Be ^((~Bi)& Bo ); Ami0 = Bi ^((~Bo)& Bu ); Aso1 = Bo ^((~Bu)& Ba ); Abu0 = Bu ^((~Ba)& Be ); Bo = rol32((Ama1^Da1), 21); Bu = rol32((Ase0^De0), 1); Ba = rol32((Abi0^Di0), 31); Be = rol32((Ago1^Do1), 28); Bi = rol32((Aku1^Du1), 20); Ama1 = Ba ^((~Be)& Bi ); Ase0 = Be ^((~Bi)& Bo ); Abi0 = Bi ^((~Bo)& Bu ); Ago1 = Bo ^((~Bu)& Ba ); Aku1 = Bu ^((~Ba)& Be ); Bo = rol32((Ama0^Da0), 20); Bu = rol32((Ase1^De1), 1); Ba = rol32((Abi1^Di1), 31); Be = rol32((Ago0^Do0), 27); Bi = rol32((Aku0^Du0), 19); Ama0 = Ba ^((~Be)& Bi ); Ase1 = Be ^((~Bi)& Bo ); Abi1 = Bi ^((~Bo)& Bu ); Ago0 = Bo ^((~Bu)& Ba ); Aku0 = Bu ^((~Ba)& Be ); KeccakAtoD_round1(); Ba = (Aba0^Da0); Be = rol32((Ame1^De0), 22); Bi = rol32((Agi1^Di1), 22); Bo = rol32((Aso1^Do1), 11); Bu = rol32((Aku1^Du0), 7); Aba0 = Ba ^((~Be)& Bi ); Aba0 ^= RoundConstants32[i+2]; Ame1 = Be ^((~Bi)& Bo ); Agi1 = Bi ^((~Bo)& Bu ); Aso1 = Bo ^((~Bu)& Ba ); Aku1 = Bu ^((~Ba)& Be ); Ba = (Aba1^Da1); Be = rol32((Ame0^De1), 22); Bi = rol32((Agi0^Di0), 21); Bo = rol32((Aso0^Do0), 10); Bu = rol32((Aku0^Du1), 7); Aba1 = Ba ^((~Be)& Bi ); Aba1 ^= RoundConstants32[i+3]; Ame0 = Be ^((~Bi)& Bo ); Agi0 = Bi ^((~Bo)& Bu ); Aso0 = Bo ^((~Bu)& Ba ); Aku0 = Bu ^((~Ba)& Be ); Bi = rol32((Asa1^Da1), 2); Bo = rol32((Ake1^De1), 23); Bu = rol32((Abi1^Di1), 31); Ba = rol32((Amo1^Do0), 14); Be = rol32((Agu0^Du0), 10); Asa1 = Ba ^((~Be)& Bi ); Ake1 = Be ^((~Bi)& Bo ); Abi1 = Bi ^((~Bo)& Bu ); Amo1 = Bo ^((~Bu)& Ba ); Agu0 = Bu ^((~Ba)& Be ); Bi = rol32((Asa0^Da0), 1); Bo = rol32((Ake0^De0), 22); Bu = rol32((Abi0^Di0), 30); Ba = rol32((Amo0^Do1), 14); Be = rol32((Agu1^Du1), 10); Asa0 = Ba ^((~Be)& Bi ); Ake0 = Be ^((~Bi)& Bo ); Abi0 = Bi ^((~Bo)& Bu ); Amo0 = Bo ^((~Bu)& Ba ); Agu1 = Bu ^((~Ba)& Be ); Bu = rol32((Ama1^Da0), 9); Ba = rol32((Age1^De1), 1); Be = rol32((Asi1^Di0), 3); Bi = rol32((Ako0^Do1), 13); Bo = rol32((Abu1^Du0), 4); Ama1 = Ba ^((~Be)& Bi ); Age1 = Be ^((~Bi)& Bo ); Asi1 = Bi ^((~Bo)& Bu ); Ako0 = Bo ^((~Bu)& Ba ); Abu1 = Bu ^((~Ba)& Be ); Bu = rol32((Ama0^Da1), 9); Ba = (Age0^De0); Be = rol32((Asi0^Di1), 3); Bi = rol32((Ako1^Do0), 12); Bo = rol32((Abu0^Du1), 4); Ama0 = Ba ^((~Be)& Bi ); Age0 = Be ^((~Bi)& Bo ); Asi0 = Bi ^((~Bo)& Bu ); Ako1 = Bo ^((~Bu)& Ba ); Abu0 = Bu ^((~Ba)& Be ); Be = rol32((Aka1^Da0), 18); Bi = rol32((Abe1^De0), 5); Bo = rol32((Ami0^Di1), 8); Bu = rol32((Ago1^Do0), 28); Ba = rol32((Asu1^Du1), 14); Aka1 = Ba ^((~Be)& Bi ); Abe1 = Be ^((~Bi)& Bo ); Ami0 = Bi ^((~Bo)& Bu ); Ago1 = Bo ^((~Bu)& Ba ); Asu1 = Bu ^((~Ba)& Be ); Be = rol32((Aka0^Da1), 18); Bi = rol32((Abe0^De1), 5); Bo = rol32((Ami1^Di0), 7); Bu = rol32((Ago0^Do1), 28); Ba = rol32((Asu0^Du0), 13); Aka0 = Ba ^((~Be)& Bi ); Abe0 = Be ^((~Bi)& Bo ); Ami1 = Bi ^((~Bo)& Bu ); Ago0 = Bo ^((~Bu)& Ba ); Asu0 = Bu ^((~Ba)& Be ); Bo = rol32((Aga1^Da1), 21); Bu = rol32((Ase0^De0), 1); Ba = rol32((Aki1^Di0), 31); Be = rol32((Abo1^Do1), 28); Bi = rol32((Amu1^Du1), 20); Aga1 = Ba ^((~Be)& Bi ); Ase0 = Be ^((~Bi)& Bo ); Aki1 = Bi ^((~Bo)& Bu ); Abo1 = Bo ^((~Bu)& Ba ); Amu1 = Bu ^((~Ba)& Be ); Bo = rol32((Aga0^Da0), 20); Bu = rol32((Ase1^De1), 1); Ba = rol32((Aki0^Di1), 31); Be = rol32((Abo0^Do0), 27); Bi = rol32((Amu0^Du0), 19); Aga0 = Ba ^((~Be)& Bi ); Ase1 = Be ^((~Bi)& Bo ); Aki0 = Bi ^((~Bo)& Bu ); Abo0 = Bo ^((~Bu)& Ba ); Amu0 = Bu ^((~Ba)& Be ); KeccakAtoD_round2(); Ba = (Aba0^Da0); Be = rol32((Ake1^De0), 22); Bi = rol32((Asi0^Di1), 22); Bo = rol32((Ago0^Do1), 11); Bu = rol32((Amu1^Du0), 7); Aba0 = Ba ^((~Be)& Bi ); Aba0 ^= RoundConstants32[i+4]; Ake1 = Be ^((~Bi)& Bo ); Asi0 = Bi ^((~Bo)& Bu ); Ago0 = Bo ^((~Bu)& Ba ); Amu1 = Bu ^((~Ba)& Be ); Ba = (Aba1^Da1); Be = rol32((Ake0^De1), 22); Bi = rol32((Asi1^Di0), 21); Bo = rol32((Ago1^Do0), 10); Bu = rol32((Amu0^Du1), 7); Aba1 = Ba ^((~Be)& Bi ); Aba1 ^= RoundConstants32[i+5]; Ake0 = Be ^((~Bi)& Bo ); Asi1 = Bi ^((~Bo)& Bu ); Ago1 = Bo ^((~Bu)& Ba ); Amu0 = Bu ^((~Ba)& Be ); Bi = rol32((Ama0^Da1), 2); Bo = rol32((Abe0^De1), 23); Bu = rol32((Aki0^Di1), 31); Ba = rol32((Aso1^Do0), 14); Be = rol32((Agu0^Du0), 10); Ama0 = Ba ^((~Be)& Bi ); Abe0 = Be ^((~Bi)& Bo ); Aki0 = Bi ^((~Bo)& Bu ); Aso1 = Bo ^((~Bu)& Ba ); Agu0 = Bu ^((~Ba)& Be ); Bi = rol32((Ama1^Da0), 1); Bo = rol32((Abe1^De0), 22); Bu = rol32((Aki1^Di0), 30); Ba = rol32((Aso0^Do1), 14); Be = rol32((Agu1^Du1), 10); Ama1 = Ba ^((~Be)& Bi ); Abe1 = Be ^((~Bi)& Bo ); Aki1 = Bi ^((~Bo)& Bu ); Aso0 = Bo ^((~Bu)& Ba ); Agu1 = Bu ^((~Ba)& Be ); Bu = rol32((Aga1^Da0), 9); Ba = rol32((Ame0^De1), 1); Be = rol32((Abi1^Di0), 3); Bi = rol32((Ako1^Do1), 13); Bo = rol32((Asu1^Du0), 4); Aga1 = Ba ^((~Be)& Bi ); Ame0 = Be ^((~Bi)& Bo ); Abi1 = Bi ^((~Bo)& Bu ); Ako1 = Bo ^((~Bu)& Ba ); Asu1 = Bu ^((~Ba)& Be ); Bu = rol32((Aga0^Da1), 9); Ba = (Ame1^De0); Be = rol32((Abi0^Di1), 3); Bi = rol32((Ako0^Do0), 12); Bo = rol32((Asu0^Du1), 4); Aga0 = Ba ^((~Be)& Bi ); Ame1 = Be ^((~Bi)& Bo ); Abi0 = Bi ^((~Bo)& Bu ); Ako0 = Bo ^((~Bu)& Ba ); Asu0 = Bu ^((~Ba)& Be ); Be = rol32((Asa1^Da0), 18); Bi = rol32((Age1^De0), 5); Bo = rol32((Ami1^Di1), 8); Bu = rol32((Abo1^Do0), 28); Ba = rol32((Aku0^Du1), 14); Asa1 = Ba ^((~Be)& Bi ); Age1 = Be ^((~Bi)& Bo ); Ami1 = Bi ^((~Bo)& Bu ); Abo1 = Bo ^((~Bu)& Ba ); Aku0 = Bu ^((~Ba)& Be ); Be = rol32((Asa0^Da1), 18); Bi = rol32((Age0^De1), 5); Bo = rol32((Ami0^Di0), 7); Bu = rol32((Abo0^Do1), 28); Ba = rol32((Aku1^Du0), 13); Asa0 = Ba ^((~Be)& Bi ); Age0 = Be ^((~Bi)& Bo ); Ami0 = Bi ^((~Bo)& Bu ); Abo0 = Bo ^((~Bu)& Ba ); Aku1 = Bu ^((~Ba)& Be ); Bo = rol32((Aka0^Da1), 21); Bu = rol32((Ase0^De0), 1); Ba = rol32((Agi1^Di0), 31); Be = rol32((Amo0^Do1), 28); Bi = rol32((Abu0^Du1), 20); Aka0 = Ba ^((~Be)& Bi ); Ase0 = Be ^((~Bi)& Bo ); Agi1 = Bi ^((~Bo)& Bu ); Amo0 = Bo ^((~Bu)& Ba ); Abu0 = Bu ^((~Ba)& Be ); Bo = rol32((Aka1^Da0), 20); Bu = rol32((Ase1^De1), 1); Ba = rol32((Agi0^Di1), 31); Be = rol32((Amo1^Do0), 27); Bi = rol32((Abu1^Du0), 19); Aka1 = Ba ^((~Be)& Bi ); Ase1 = Be ^((~Bi)& Bo ); Agi0 = Bi ^((~Bo)& Bu ); Amo1 = Bo ^((~Bu)& Ba ); Abu1 = Bu ^((~Ba)& Be ); KeccakAtoD_round3(); Ba = (Aba0^Da0); Be = rol32((Abe0^De0), 22); Bi = rol32((Abi0^Di1), 22); Bo = rol32((Abo0^Do1), 11); Bu = rol32((Abu0^Du0), 7); Aba0 = Ba ^((~Be)& Bi ); Aba0 ^= RoundConstants32[i+6]; Abe0 = Be ^((~Bi)& Bo ); Abi0 = Bi ^((~Bo)& Bu ); Abo0 = Bo ^((~Bu)& Ba ); Abu0 = Bu ^((~Ba)& Be ); Ba = (Aba1^Da1); Be = rol32((Abe1^De1), 22); Bi = rol32((Abi1^Di0), 21); Bo = rol32((Abo1^Do0), 10); Bu = rol32((Abu1^Du1), 7); Aba1 = Ba ^((~Be)& Bi ); Aba1 ^= RoundConstants32[i+7]; Abe1 = Be ^((~Bi)& Bo ); Abi1 = Bi ^((~Bo)& Bu ); Abo1 = Bo ^((~Bu)& Ba ); Abu1 = Bu ^((~Ba)& Be ); Bi = rol32((Aga0^Da1), 2); Bo = rol32((Age0^De1), 23); Bu = rol32((Agi0^Di1), 31); Ba = rol32((Ago0^Do0), 14); Be = rol32((Agu0^Du0), 10); Aga0 = Ba ^((~Be)& Bi ); Age0 = Be ^((~Bi)& Bo ); Agi0 = Bi ^((~Bo)& Bu ); Ago0 = Bo ^((~Bu)& Ba ); Agu0 = Bu ^((~Ba)& Be ); Bi = rol32((Aga1^Da0), 1); Bo = rol32((Age1^De0), 22); Bu = rol32((Agi1^Di0), 30); Ba = rol32((Ago1^Do1), 14); Be = rol32((Agu1^Du1), 10); Aga1 = Ba ^((~Be)& Bi ); Age1 = Be ^((~Bi)& Bo ); Agi1 = Bi ^((~Bo)& Bu ); Ago1 = Bo ^((~Bu)& Ba ); Agu1 = Bu ^((~Ba)& Be ); Bu = rol32((Aka0^Da0), 9); Ba = rol32((Ake0^De1), 1); Be = rol32((Aki0^Di0), 3); Bi = rol32((Ako0^Do1), 13); Bo = rol32((Aku0^Du0), 4); Aka0 = Ba ^((~Be)& Bi ); Ake0 = Be ^((~Bi)& Bo ); Aki0 = Bi ^((~Bo)& Bu ); Ako0 = Bo ^((~Bu)& Ba ); Aku0 = Bu ^((~Ba)& Be ); Bu = rol32((Aka1^Da1), 9); Ba = (Ake1^De0); Be = rol32((Aki1^Di1), 3); Bi = rol32((Ako1^Do0), 12); Bo = rol32((Aku1^Du1), 4); Aka1 = Ba ^((~Be)& Bi ); Ake1 = Be ^((~Bi)& Bo ); Aki1 = Bi ^((~Bo)& Bu ); Ako1 = Bo ^((~Bu)& Ba ); Aku1 = Bu ^((~Ba)& Be ); Be = rol32((Ama0^Da0), 18); Bi = rol32((Ame0^De0), 5); Bo = rol32((Ami0^Di1), 8); Bu = rol32((Amo0^Do0), 28); Ba = rol32((Amu0^Du1), 14); Ama0 = Ba ^((~Be)& Bi ); Ame0 = Be ^((~Bi)& Bo ); Ami0 = Bi ^((~Bo)& Bu ); Amo0 = Bo ^((~Bu)& Ba ); Amu0 = Bu ^((~Ba)& Be ); Be = rol32((Ama1^Da1), 18); Bi = rol32((Ame1^De1), 5); Bo = rol32((Ami1^Di0), 7); Bu = rol32((Amo1^Do1), 28); Ba = rol32((Amu1^Du0), 13); Ama1 = Ba ^((~Be)& Bi ); Ame1 = Be ^((~Bi)& Bo ); Ami1 = Bi ^((~Bo)& Bu ); Amo1 = Bo ^((~Bu)& Ba ); Amu1 = Bu ^((~Ba)& Be ); Bo = rol32((Asa0^Da1), 21); Bu = rol32((Ase0^De0), 1); Ba = rol32((Asi0^Di0), 31); Be = rol32((Aso0^Do1), 28); Bi = rol32((Asu0^Du1), 20); Asa0 = Ba ^((~Be)& Bi ); Ase0 = Be ^((~Bi)& Bo ); Asi0 = Bi ^((~Bo)& Bu ); Aso0 = Bo ^((~Bu)& Ba ); Asu0 = Bu ^((~Ba)& Be ); Bo = rol32((Asa1^Da0), 20); Bu = rol32((Ase1^De1), 1); Ba = rol32((Asi1^Di1), 31); Be = rol32((Aso1^Do0), 27); Bi = rol32((Asu1^Du0), 19); Asa1 = Ba ^((~Be)& Bi ); Ase1 = Be ^((~Bi)& Bo ); Asi1 = Bi ^((~Bo)& Bu ); Aso1 = Bo ^((~Bu)& Ba ); Asu1 = Bu ^((~Ba)& Be ); } } static void xor_lane(struct KeccakContext *ctx, int lane, uint64_t val) { uint32_t x0, x1, t; uint32_t *dst = ctx->u.state32 + lane*2; x0 = val; t = (x0 ^ (x0 >> 1)) & 0x22222222; x0 = x0 ^ t ^ (t << 1); t = (x0 ^ (x0 >> 2)) & 0x0C0C0C0C; x0 = x0 ^ t ^ (t << 2); t = (x0 ^ (x0 >> 4)) & 0x00F000F0; x0 = x0 ^ t ^ (t << 4); t = (x0 ^ (x0 >> 8)) & 0x0000FF00; x0 = x0 ^ t ^ (t << 8); x1 = val >> 32; t = (x1 ^ (x1 >> 1)) & 0x22222222; x1 = x1 ^ t ^ (t << 1); t = (x1 ^ (x1 >> 2)) & 0x0C0C0C0C; x1 = x1 ^ t ^ (t << 2); t = (x1 ^ (x1 >> 4)) & 0x00F000F0; x1 = x1 ^ t ^ (t << 4); t = (x1 ^ (x1 >> 8)) & 0x0000FF00; x1 = x1 ^ t ^ (t << 8); dst[0] ^= (x0 & 0x0000FFFF) | (x1 << 16); dst[1] ^= (x0 >> 16) | (x1 & 0xFFFF0000); } static void extract(uint8_t *dst, const struct KeccakContext *ctx, int laneCount) { const uint32_t *src = ctx->u.state32; uint32_t t, x0, x1; while (laneCount--) { x0 = *src++; x1 = *src++; t = (x0 & 0x0000FFFF) | (x1 << 16); x1 = (x0 >> 16) | (x1 & 0xFFFF0000); x0 = t; t = (x0 ^ (x0 >> 8)) & 0x0000FF00; x0 = x0 ^ t ^ (t << 8); t = (x0 ^ (x0 >> 4)) & 0x00F000F0; x0 = x0 ^ t ^ (t << 4); t = (x0 ^ (x0 >> 2)) & 0x0C0C0C0C; x0 = x0 ^ t ^ (t << 2); t = (x0 ^ (x0 >> 1)) & 0x22222222; x0 = x0 ^ t ^ (t << 1); t = (x1 ^ (x1 >> 8)) & 0x0000FF00; x1 = x1 ^ t ^ (t << 8); t = (x1 ^ (x1 >> 4)) & 0x00F000F0; x1 = x1 ^ t ^ (t << 4); t = (x1 ^ (x1 >> 2)) & 0x0C0C0C0C; x1 = x1 ^ t ^ (t << 2); t = (x1 ^ (x1 >> 1)) & 0x22222222; x1 = x1 ^ t ^ (t << 1); le32enc(dst + 0, x0); le32enc(dst + 4, x1); dst += 8; } } #endif /* KECCAK_32BIT */ /* * Common code */ static void xor_byte(struct KeccakContext *ctx, int nbyte, uint8_t val) { int o = nbyte / 8; int s = (nbyte % 8) * 8; xor_lane(ctx, o, (uint64_t)(val) << s); } static void add_bytes(struct KeccakContext *ctx, const uint8_t *p, unsigned int len) { uint64_t w; unsigned int m = ctx->bytes % 8; /* partial word */ if (m) { m = 8 - m; if (m > len) m = len; while (m--) { xor_byte(ctx, ctx->bytes++, *p++); len--; } } /* full words */ while (len >= 8) { w = le64dec(p); xor_lane(ctx, ctx->bytes / 8, w); ctx->bytes += 8; p += 8; len -= 8; } /* partial word */ while (len--) xor_byte(ctx, ctx->bytes++, *p++); } static void reset(struct KeccakContext *ctx, int rbytes, int obytes) { memset(ctx, 0, sizeof(struct KeccakContext)); ctx->rbytes = rbytes; ctx->obytes = obytes; } /* * Public API */ void keccak224_init(struct KeccakContext *ctx) { reset(ctx, KECCAK224_BLOCK_SIZE, KECCAK224_DIGEST_LENGTH); } void keccak256_init(struct KeccakContext *ctx) { reset(ctx, KECCAK256_BLOCK_SIZE, KECCAK256_DIGEST_LENGTH); } void keccak384_init(struct KeccakContext *ctx) { reset(ctx, KECCAK384_BLOCK_SIZE, KECCAK384_DIGEST_LENGTH); } void keccak512_init(struct KeccakContext *ctx) { reset(ctx, KECCAK512_BLOCK_SIZE, KECCAK512_DIGEST_LENGTH); } void keccak_stream_init(struct KeccakContext *ctx) { reset(ctx, KECCAK_STREAM_BLOCK_SIZE, KECCAK_STREAM_DIGEST_LENGTH); } void keccak_update(struct KeccakContext *ctx, const void *data, unsigned int len) { unsigned int n; const uint8_t *ptr = data; while (len > 0) { n = ctx->rbytes - ctx->bytes; if (n > len) n = len; add_bytes(ctx, ptr, n); ptr += n; len -= n; if (ctx->bytes == ctx->rbytes) { keccak_f(ctx); ctx->bytes = 0; } } } void keccak_final(struct KeccakContext *ctx, uint8_t *dst) { if (!ctx->padded) { /* 2-bit padding, assumes bytes < rbytes */ xor_byte(ctx, ctx->bytes, 0x01); xor_byte(ctx, ctx->rbytes - 1, 0x80); ctx->padded = 1; } keccak_f(ctx); if (ctx->obytes == KECCAK224_DIGEST_LENGTH) { /* 224-bit result uses partial words */ uint8_t buf[KECCAK256_DIGEST_LENGTH]; extract(buf, ctx, KECCAK256_DIGEST_LENGTH / 8); memcpy(dst, buf, KECCAK224_DIGEST_LENGTH); memset(buf, 0, sizeof(buf)); } else { extract(dst, ctx, ctx->obytes / 8); } } /* * DigestInfo */ const struct DigestInfo *digest_KECCAK224(void) { static const struct DigestInfo info = { (DigestInitFunc *)keccak224_init, (DigestUpdateFunc *)keccak_update, (DigestFinalFunc *)keccak_final, sizeof(struct KeccakContext), KECCAK224_DIGEST_LENGTH, KECCAK224_BLOCK_SIZE }; return &info; } const struct DigestInfo *digest_KECCAK256(void) { static const struct DigestInfo info = { (DigestInitFunc *)keccak256_init, (DigestUpdateFunc *)keccak_update, (DigestFinalFunc *)keccak_final, sizeof(struct KeccakContext), KECCAK256_DIGEST_LENGTH, KECCAK256_BLOCK_SIZE }; return &info; } const struct DigestInfo *digest_KECCAK384(void) { static const struct DigestInfo info = { (DigestInitFunc *)keccak384_init, (DigestUpdateFunc *)keccak_update, (DigestFinalFunc *)keccak_final, sizeof(struct KeccakContext), KECCAK384_DIGEST_LENGTH, KECCAK384_BLOCK_SIZE }; return &info; } const struct DigestInfo *digest_KECCAK512(void) { static const struct DigestInfo info = { (DigestInitFunc *)keccak512_init, (DigestUpdateFunc *)keccak_update, (DigestFinalFunc *)keccak_final, sizeof(struct KeccakContext), KECCAK512_DIGEST_LENGTH, KECCAK512_BLOCK_SIZE }; return &info; } const struct DigestInfo *digest_KECCAK_STREAM(void) { static const struct DigestInfo info = { (DigestInitFunc *)keccak_stream_init, (DigestUpdateFunc *)keccak_update, (DigestFinalFunc *)keccak_final, sizeof(struct KeccakContext), KECCAK_STREAM_DIGEST_LENGTH, KECCAK_STREAM_BLOCK_SIZE }; return &info; } skytools-3.2.6/lib/usual/slab.c0000644000000000000000000001471712166266754013314 0ustar /* * Primitive slab allocator. * * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #ifndef USUAL_FAKE_SLAB /* * Store for pre-initialized objects of one type. */ struct Slab { struct List head; struct StatList freelist; struct StatList fraglist; char name[32]; unsigned final_size; unsigned total_count; slab_init_fn init_func; CxMem *cx; }; /* * Header for each slab. */ struct SlabFrag { struct List head; }; /* keep track of all active slabs */ static STATLIST(slab_list); static void slab_list_append(struct Slab *slab) { #ifndef _REENTRANT statlist_append(&slab_list, &slab->head); #endif } static void slab_list_remove(struct Slab *slab) { #ifndef _REENTRANT statlist_remove(&slab_list, &slab->head); #endif } /* fill struct contents */ static void init_slab(struct Slab *slab, const char *name, unsigned obj_size, unsigned align, slab_init_fn init_func, CxMem *cx) { unsigned slen = strlen(name); list_init(&slab->head); statlist_init(&slab->freelist, name); statlist_init(&slab->fraglist, name); slab->total_count = 0; slab->init_func = init_func; slab->cx = cx; if (slen >= sizeof(slab->name)) slen = sizeof(slab->name) - 1; memcpy(slab->name, name, slen); slab->name[slen] = 0; /* don't allow too small align, as we want to put pointers into area */ if (align < sizeof(long)) align = 0; /* actual area for one object */ if (align == 0) slab->final_size = ALIGN(obj_size); else slab->final_size = CUSTOM_ALIGN(obj_size, align); /* allow small structs */ if (slab->final_size < sizeof(struct List)) slab->final_size = sizeof(struct List); slab_list_append(slab); } /* make new slab */ struct Slab *slab_create(const char *name, unsigned obj_size, unsigned align, slab_init_fn init_func, CxMem *cx) { struct Slab *slab; /* new slab object */ slab = cx_alloc0(cx, sizeof(*slab)); if (slab) init_slab(slab, name, obj_size, align, init_func, cx); return slab; } /* free all storage associated by slab */ void slab_destroy(struct Slab *slab) { struct List *item, *tmp; struct SlabFrag *frag; if (!slab) return; slab_list_remove(slab); statlist_for_each_safe(item, &slab->fraglist, tmp) { frag = container_of(item, struct SlabFrag, head); cx_free(slab->cx, frag); } cx_free(slab->cx, slab); } /* add new block of objects to slab */ static void grow(struct Slab *slab) { unsigned count, i, size; char *area; struct SlabFrag *frag; /* calc new slab size */ count = slab->total_count; if (count < 50) count = 16 * 1024 / slab->final_size; if (count < 50) count = 50; size = count * slab->final_size; /* allocate & init */ frag = cx_alloc0(slab->cx, size + sizeof(struct SlabFrag)); if (!frag) return; list_init(&frag->head); area = (char *)frag + sizeof(struct SlabFrag); /* init objects */ for (i = 0; i < count; i++) { void *obj = area + i * slab->final_size; struct List *head = (struct List *)obj; list_init(head); statlist_append(&slab->freelist, head); } /* register to slab */ slab->total_count += count; statlist_append(&slab->fraglist, &frag->head); } /* get free object from slab */ void *slab_alloc(struct Slab *slab) { struct List *item = statlist_pop(&slab->freelist); if (!item) { grow(slab); item = statlist_pop(&slab->freelist); } if (item) { if (slab->init_func) slab->init_func(item); else memset(item, 0, slab->final_size); } return item; } /* put object back to slab */ void slab_free(struct Slab *slab, void *obj) { struct List *item = obj; list_init(item); statlist_prepend(&slab->freelist, item); } /* total number of objects allocated from slab */ int slab_total_count(const struct Slab *slab) { return slab->total_count; } /* free objects in slab */ int slab_free_count(const struct Slab *slab) { return statlist_count(&slab->freelist); } /* number of objects in use */ int slab_active_count(const struct Slab *slab) { return slab_total_count(slab) - slab_free_count(slab); } static void run_slab_stats(struct Slab *slab, slab_stat_fn cb_func, void *cb_arg) { unsigned free = statlist_count(&slab->freelist); cb_func(cb_arg, slab->name, slab->final_size, free, slab->total_count); } /* call a function for all active slabs */ void slab_stats(slab_stat_fn cb_func, void *cb_arg) { struct Slab *slab; struct List *item; statlist_for_each(item, &slab_list) { slab = container_of(item, struct Slab, head); run_slab_stats(slab, cb_func, cb_arg); } } #else struct Slab { int size; struct StatList obj_list; slab_init_fn init_func; CxMem *cx; }; struct Slab *slab_create(const char *name, unsigned obj_size, unsigned align, slab_init_fn init_func, CxMem *cx) { struct Slab *s = cx_alloc(cx, sizeof(*s)); if (s) { s->size = obj_size; s->init_func = init_func; s->cx = cx; statlist_init(&s->obj_list, "obj_list"); } return s; } void slab_destroy(struct Slab *slab) { struct List *el, *tmp; statlist_for_each_safe(el, &slab->obj_list, tmp) { statlist_remove(&slab->obj_list, el); cx_free(slab->cx, el); } cx_free(slab->cx, slab); } void *slab_alloc(struct Slab *slab) { struct List *o; void *res; o = cx_alloc(slab->cx, sizeof(struct List) + slab->size); if (!o) return NULL; list_init(o); statlist_append(&slab->obj_list, o); res = (void *)(o + 1); if (slab->init_func) slab->init_func(res); return res; } void slab_free(struct Slab *slab, void *obj) { if (obj) { struct List *el = obj; statlist_remove(&slab->obj_list, el - 1); cx_free(slab->cx, el - 1); } } int slab_total_count(const struct Slab *slab) { return 0; } int slab_free_count(const struct Slab *slab) { return 0; } int slab_active_count(const struct Slab *slab) { return 0; } void slab_stats(slab_stat_fn cb_func, void *cb_arg) {} #endif skytools-3.2.6/lib/usual/pgutil.c0000644000000000000000000001141712166266754013671 0ustar /* * Some utility functions for Postgres. * * - Literal & ident quoting. * - Array parsing */ #include #include /* str -> E'str' */ bool pg_quote_literal(char *_dst, const char *_src, int dstlen) { char *dst = _dst; char *end = _dst + dstlen - 2; const char *src = _src; bool stdquote = true; if (dstlen < 3) return false; if (_src == NULL) { if (dstlen < 5) return false; memcpy(_dst, "NULL", 5); return true; } retry: *dst++ = '\''; while (*src && dst < end) { if (*src == '\'') *dst++ = '\''; else if (*src == '\\') { if (stdquote) goto retry_ext; *dst++ = '\\'; } *dst++ = *src++; } if (*src || dst > end) return false; *dst++ = '\''; *dst = 0; return true; retry_ext: /* string contains '\\', retry as E'' string */ dst = _dst; src = _src; *dst++ = 'E'; stdquote = false; goto retry; } static inline bool id_start(unsigned char c) { return (c >= 'a' && c <= 'z') || c == '_'; } static inline bool id_body(unsigned char c) { return id_start(c) || (c >= '0' && c <= '9'); } /* ident -> "ident" */ bool pg_quote_ident(char *_dst, const char *_src, int dstlen) { char *dst = _dst; char *end = _dst + dstlen - 1; const char *src = _src; if (dstlen < 1) return false; if (!id_start(*src)) goto needs_quoting; while (*src && dst < end) { if (!id_body(*src)) goto needs_quoting; *dst++ = *src++; } if (*src) return false; *dst = 0; if (!pg_is_reserved_word(_dst)) return true; needs_quoting: dst = _dst; src = _src; end = _dst + dstlen - 2; if (dstlen < 3) return false; *dst++ = '"'; while (*src && dst < end) { if (*src == '"') *dst++ = *src; *dst++ = *src++; } if (*src) return false; *dst++ = '"'; *dst = 0; return true; } /* schema.name -> "schema"."name" */ bool pg_quote_fqident(char *_dst, const char *_src, int dstlen) { const char *dot = strchr(_src, '.'); char scmbuf[128]; const char *scm; int scmlen; if (dot) { scmlen = dot - _src; if (scmlen >= (int)sizeof(scmbuf)) return false; memcpy(scmbuf, _src, scmlen); scmbuf[scmlen] = 0; scm = scmbuf; _src = dot + 1; } else { scm = "public"; } if (!pg_quote_ident(_dst, scm, dstlen)) return false; scmlen = strlen(_dst); _dst[scmlen] = '.'; _dst += scmlen + 1; dstlen -= scmlen + 1; if (!pg_quote_ident(_dst, _src, dstlen)) return false; return true; } /* * pgarray parsing */ static bool parse_value(struct StrList *arr, const char *val, const char *vend, CxMem *cx) { int len; const char *s; char *str, *p; unsigned c; while (val < vend && isspace(*val)) val++; while (vend > val && isspace(vend[-1])) vend--; if (val == vend) return false; s = val; len = vend - val; if (len == 4 && !strncasecmp(val, "null", len)) { return strlist_append_ref(arr, NULL); } p = str = cx_alloc(cx, len + 1); if (!str) return false; /* unquote & copy */ while (s < vend) { c = *s++; if (c == '"') { while (1) { c = *s++; if (c == '"') break; else if (c == '\\') *p++ = *s++; else *p++ = c; } } else if (c == '\\') { *p++ = *s++; } else *p++ = c; } *p++ = 0; if (!strlist_append_ref(arr, str)) { cx_free(cx, str); return false; } return true; } struct StrList *pg_parse_array(const char *pgarr, CxMem *cx) { const char *s = pgarr; struct StrList *lst; const char *val = NULL; unsigned c; /* skip dimension def "[x,y]={..}" */ if (*s == '[') { s = strchr(s, ']'); if (!s || s[1] != '=') return NULL; s += 2; } if (*s++ != '{') return NULL; lst = strlist_new(cx); if (!lst) return NULL; while (*s) { /* array end */ if (s[0] == '}') { if (s[1] != 0) { goto failed; } if (val) { if (!parse_value(lst, val, s, cx)) goto failed; } return lst; } /* cannot init earlier to support empty arrays */ if (!val) val = s; /* val done? */ if (*s == ',') { if (!parse_value(lst, val, s, cx)) goto failed; val = ++s; continue; } /* scan value */ c = *s++; if (c == '"') { while (1) { c = *s++; if (c == '"') break; else if (c == '\\') { if (!*s) goto failed; s++; } else if (!*s) goto failed; } } else if (c == '\\') { if (!*s) goto failed; s++; } } if (s[-1] != '}') goto failed; return lst; failed: strlist_free(lst); return NULL; } /* * Postgres keyword lookup. */ /* gperf tries ot inline a non-static function. */ #undef inline #undef __inline #undef __attribute__ #define inline #define __inline #define __attribute__(x) #define long uintptr_t /* include gperf code */ const char *pg_keyword_lookup_real(const char *str, unsigned int len); #include bool pg_is_reserved_word(const char *str) { const char *kw = pg_keyword_lookup_real(str, strlen(str)); return kw != NULL; } skytools-3.2.6/lib/usual/cxalloc.h0000644000000000000000000000656212166266754014024 0ustar /* * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * * Context-based Memory Allocator. * * The idea is that each data structure is given a context to allocate from, * and it can create subcontext for that which can be specific allocation * pattern that matches the data structure. * * It is slightly more work to use than palloc (PostgreSQL) or talloc (Samba), * but it avoids the need to have big fully-featured framework that does * everything at once. * * Instead you have small task-specific allocators, and you can always fall * back to raw malloc if you want to valgrind the code. * * Potential variants: * - fully-featured pooled * - randomly failing * - logging * - locking * - guard signatures * - palloc / talloc like API */ #ifndef _USUAL_CXALLOC_H_ #define _USUAL_CXALLOC_H_ #include /** * Ops for allocator that takes context. * * NB! - they are not equivalent to cx_* API. The cx_* * functions do additional sanitizing. */ struct CxOps { /** * Allocate memory. * len will not be 0. */ void *(*c_alloc)(void *ctx, size_t len); /** * Resize existing allocation. * Both p and len will not be 0 */ void *(*c_realloc)(void *ctx, void *p, size_t len); /** * Free existing allocation. * p will not be 0 */ void (*c_free)(void *ctx, const void *p); /** * Release all memory in context. * This is not supported by all allocators. */ void (*c_destroy)(void *ctx); }; /** * Memory allocation context. */ struct CxMem { const struct CxOps *ops; void *ctx; }; /** Shortcut to const CxMem */ typedef const struct CxMem CxMem; /* * Basic operations on allocation context. */ /** * Allocate memory from context. * * Returns NULL if no memory or len == 0. */ void *cx_alloc(CxMem *cx, size_t len) _MALLOC; /** * Change existing allocation. * * If ptr is NULL it creates new allocation. * If len is 0 it frees the memory. */ void *cx_realloc(CxMem *cx, void *ptr, size_t len); /** * Free existing allocation. * * Does nothing if ptr is NULL. */ void cx_free(CxMem *cx, const void *ptr); /** * Release all memory allocated in context. * * Should be called only on contexts that support it. */ void cx_destroy(CxMem *cx); /** Allocate and zero-fill memory */ void *cx_alloc0(CxMem *cx, size_t len) _MALLOC; /** Allocate and copy */ void *cx_memdup(CxMem *cx, const void *src, size_t len) _MALLOC; /** Allocate and copy string */ void *cx_strdup(CxMem *cx, const char *str) _MALLOC; /** Allocator that uses libc malloc/realloc/free */ extern CxMem cx_libc_allocator; /** Default allocator */ #ifndef USUAL_ALLOC #define USUAL_ALLOC (&cx_libc_allocator) #endif #endif skytools-3.2.6/lib/usual/pgsocket.c0000644000000000000000000002021412166266754014177 0ustar /* * Async Postgres connection. * * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #define MAX_QRY_ARGS 32 /* PgSocket.wait_type */ enum WType { W_NONE = 0, W_SOCK, W_TIME }; typedef void (*libev_cb)(int sock, short flags, void *arg); struct PgSocket { /* libevent state */ struct event ev; /* track wait state */ enum WType wait_type; /* EV_READ / EV_WRITE */ uint8_t wait_event; /* should connect after sleep */ bool reconnect; /* current connection */ PGconn *con; /* user handler */ pgs_handler_f handler_func; void *handler_arg; /* saved connect string */ const char *connstr; /* custom base or NULL */ struct event_base *base; /* temp place for resultset */ PGresult *last_result; usec_t connect_time; usec_t lifetime; }; /* report event to user callback */ static void send_event(struct PgSocket *db, enum PgEvent ev) { db->handler_func(db, db->handler_arg, ev, NULL); } /* wait socket event from libevent */ static void wait_event(struct PgSocket *db, short ev, libev_cb fn) { Assert(!db->wait_type); event_set(&db->ev, PQsocket(db->con), ev, fn, db); if (db->base) event_base_set(db->base, &db->ev); if (event_add(&db->ev, NULL) < 0) fatal_perror("event_add"); db->wait_type = W_SOCK; db->wait_event = ev; } /* wait timeout from libevent */ static void timeout_cb(int sock, short flags, void *arg) { struct PgSocket *db = arg; db->wait_type = W_NONE; if (db->reconnect) { db->reconnect = false; pgs_connect(db); } else { send_event(db, PGS_TIMEOUT); } } /* some error happened */ static void conn_error(struct PgSocket *db, enum PgEvent ev, const char *desc) { log_error("connection error: %s", desc); log_error("libpq: %s", PQerrorMessage(db->con)); send_event(db, ev); } /* report previously stored result */ static void report_last_result(struct PgSocket *db) { PGresult *res = db->last_result; if (!res) return; db->last_result = NULL; switch (PQresultStatus(res)) { default: log_error("%s: %s", PQdb(db->con), PQresultErrorMessage(res)); case PGRES_COMMAND_OK: case PGRES_TUPLES_OK: case PGRES_COPY_OUT: case PGRES_COPY_IN: db->handler_func(db, db->handler_arg, PGS_RESULT_OK, res); } PQclear(res); } /* * Called when select() told that conn is avail for reading. * * It should call postgres handlers and then change state if needed. * * Because the callback may want to close the connection when processing * last resultset, the PGresult handover is delayed one step. */ static void result_cb(int sock, short flags, void *arg) { struct PgSocket *db = arg; PGresult *res; db->wait_type = W_NONE; if (!PQconsumeInput(db->con)) { conn_error(db, PGS_RESULT_BAD, "PQconsumeInput"); return; } /* loop until PQgetResult returns NULL */ while (db->con) { /* incomplete result? */ if (PQisBusy(db->con)) { wait_event(db, EV_READ, result_cb); return; } /* next result */ res = PQgetResult(db->con); if (!res) break; report_last_result(db); db->last_result = res; } report_last_result(db); } static void flush(struct PgSocket *db); static void send_cb(int sock, short flags, void *arg) { struct PgSocket *db = arg; db->wait_type = W_NONE; flush(db); } /* handle connect states */ static void connect_cb(int sock, short flags, void *arg) { struct PgSocket *db = arg; PostgresPollingStatusType poll_res; db->wait_type = W_NONE; poll_res = PQconnectPoll(db->con); switch (poll_res) { case PGRES_POLLING_WRITING: wait_event(db, EV_WRITE, connect_cb); break; case PGRES_POLLING_READING: wait_event(db, EV_READ, connect_cb); break; case PGRES_POLLING_OK: db->connect_time = get_time_usec(); send_event(db, PGS_CONNECT_OK); break; default: conn_error(db, PGS_CONNECT_FAILED, "PQconnectPoll"); } } /* send query to server */ static void flush(struct PgSocket *db) { int res = PQflush(db->con); if (res > 0) { wait_event(db, EV_WRITE, send_cb); } else if (res == 0) { wait_event(db, EV_READ, result_cb); } else conn_error(db, PGS_RESULT_BAD, "PQflush"); } /* override default notice receiver */ static void custom_notice_receiver(void *arg, const PGresult *res) { /* do nothing */ } /* * Public API */ struct PgSocket *pgs_create(const char *connstr, pgs_handler_f fn, void *handler_arg) { struct PgSocket *db; db = calloc(1, sizeof(*db)); if (!db) return NULL; db->handler_func = fn; db->handler_arg = handler_arg; db->connstr = strdup(connstr); if (!db->connstr) { pgs_free(db); return NULL; } return db; } void pgs_set_event_base(struct PgSocket *pgs, struct event_base *base) { pgs->base = base; } void pgs_set_lifetime(struct PgSocket *pgs, double lifetime) { pgs->lifetime = USEC * lifetime; } void pgs_connect(struct PgSocket *db) { if (db->con) pgs_disconnect(db); db->con = PQconnectStart(db->connstr); if (db->con == NULL) { conn_error(db, PGS_CONNECT_FAILED, "PQconnectStart"); return; } if (PQstatus(db->con) == CONNECTION_BAD) { conn_error(db, PGS_CONNECT_FAILED, "PQconnectStart"); return; } PQsetNoticeReceiver(db->con, custom_notice_receiver, db); wait_event(db, EV_WRITE, connect_cb); } void pgs_disconnect(struct PgSocket *db) { if (db->wait_type) { event_del(&db->ev); db->wait_type = W_NONE; db->reconnect = false; } if (db->con) { PQfinish(db->con); db->con = NULL; } if (db->last_result) { PQclear(db->last_result); db->last_result = NULL; } } void pgs_free(struct PgSocket *db) { if (db) { pgs_disconnect(db); free(db->connstr); free(db); } } void pgs_sleep(struct PgSocket *db, double timeout) { struct timeval tv; Assert(!db->wait_type); if (db->con && db->lifetime) { usec_t now = get_time_usec(); if (db->connect_time + db->lifetime < now) { pgs_disconnect(db); db->reconnect = true; } } tv.tv_sec = timeout; tv.tv_usec = (timeout - tv.tv_sec) * USEC; evtimer_set(&db->ev, timeout_cb, db); if (db->base) event_base_set(db->base, &db->ev); if (evtimer_add(&db->ev, &tv) < 0) fatal_perror("event_add"); db->wait_type = W_TIME; } void pgs_reconnect(struct PgSocket *db, double timeout) { pgs_disconnect(db); pgs_sleep(db, timeout); db->reconnect = true; } void pgs_send_query_simple(struct PgSocket *db, const char *q) { int res; log_noise("%s", q); res = PQsendQuery(db->con, q); if (!res) { conn_error(db, PGS_RESULT_BAD, "PQsendQuery"); return; } flush(db); } void pgs_send_query_params(struct PgSocket *db, const char *q, int cnt, ...) { int i; va_list ap; const char * args[MAX_QRY_ARGS]; if (cnt < 0 || cnt > MAX_QRY_ARGS) { log_warning("bad query arg cnt"); send_event(db, PGS_RESULT_BAD); return; } va_start(ap, cnt); for (i = 0; i < cnt; i++) args[i] = va_arg(ap, char *); va_end(ap); pgs_send_query_params_list(db, q, cnt, args); } void pgs_send_query_params_list(struct PgSocket *db, const char *q, int cnt, const char *args[]) { int res; log_noise("%s", q); res = PQsendQueryParams(db->con, q, cnt, NULL, args, NULL, NULL, 0); if (!res) { conn_error(db, PGS_RESULT_BAD, "PQsendQueryParams"); return; } flush(db); } int pgs_connection_valid(struct PgSocket *db) { return (db->con != NULL); } PGconn *pgs_get_connection(struct PgSocket *db) { return db->con; } bool pgs_waiting_for_reply(struct PgSocket *db) { if (!db->con) return false; if (PQstatus(db->con) != CONNECTION_OK) return false; return (db->wait_type == W_SOCK) && (db->wait_event == EV_READ); } skytools-3.2.6/lib/usual/shlist.h0000644000000000000000000001034512166266754013677 0ustar /* * Circular list for shared mem. * * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * * Circular list for shared mem. * * Instead of pointers, it uses offsets from list head. */ #ifndef _USUAL_SHLIST_H_ #define _USUAL_SHLIST_H_ #include /** List node/head. Uses offsets from head instead of direct pointers. */ struct SHList { /** Offset to next elem */ uintptr_t next; /** Offset from next elem */ uintptr_t prev; }; /* * Calculate offset relative to base. * * Instead of using some third pointer (eg. shmem start) as base, * we use list itself as base. This results in simpler APi * and also means that empty list appears as zero-filled. */ static inline uintptr_t _ptr2sh(const void *base, const void *ptr) { return (uintptr_t)((char *)ptr - (char *)base); } static inline void *_sh2ptr(const void *base, uintptr_t sh) { return (void *)((char *)base + sh); } /* * List operations. */ /** Initialize list head */ static inline void shlist_init(struct SHList *list) { list->next = _ptr2sh(list, list); list->prev = _ptr2sh(list, list); } /** Insert as last element */ static inline void shlist_append(struct SHList *list, struct SHList *item) { struct SHList *last = _sh2ptr(list, list->prev); item->next = _ptr2sh(list, list); item->prev = _ptr2sh(list, last); list->prev = _ptr2sh(list, item); last->next = _ptr2sh(list, item); } /** Insert as first element */ static inline void shlist_prepend(struct SHList *list, struct SHList *item) { struct SHList *first = _sh2ptr(list, list->next); item->next = _ptr2sh(list, first); item->prev = _ptr2sh(list, list); list->next = _ptr2sh(list, item); first->prev = _ptr2sh(list, item); } /** Remove an item */ static inline void shlist_remove(struct SHList *list, struct SHList *item) { struct SHList *next = _sh2ptr(list, item->next); struct SHList *prev = _sh2ptr(list, item->prev); prev->next = item->next; next->prev = item->prev; item->next = item->prev = 0; /* _ptr2sh(list, item) does not make sense here; */ } /** No elements? */ static inline bool shlist_empty(const struct SHList *list) { return list->next == list->prev; } static inline struct SHList *shlist_next(const struct SHList *list, const struct SHList *elem) { return _sh2ptr(list, elem->next); } static inline struct SHList *shlist_prev(const struct SHList *list, const struct SHList *elem) { return _sh2ptr(list, elem->prev); } /** Return first elem */ static inline struct SHList *shlist_first(const struct SHList *list) { if (shlist_empty(list)) return NULL; return _sh2ptr(list, list->next); } /** Return last elem */ static inline struct SHList *shlist_last(const struct SHList *list) { if (shlist_empty(list)) return NULL; return _sh2ptr(list, list->prev); } /** Remove first elem */ static inline struct SHList *shlist_pop(struct SHList *list) { struct SHList *item = shlist_first(list); if (item) shlist_remove(list, item); return item; } /** Remove and return specific type of elem */ #define shlist_pop_type(list, type, field) ( \ shlist_empty(list) ? NULL : container_of(shlist_pop(list), type, field)) /** Loop over list */ #define shlist_for_each(item, list) \ for ((item) = _sh2ptr((list), (list)->next); \ (item) != (list); \ (item) = _sh2ptr((list), (item)->next)) /** Loop over list and allow removing item */ #define shlist_for_each_safe(item, list, tmp) \ for ((item) = _sh2ptr((list), (list)->next), \ (tmp) = _sh2ptr((list), (item)->next); \ (item) != (list); \ (item) = (tmp), (tmp) = _sh2ptr((list), (tmp)->next)) #endif skytools-3.2.6/lib/usual/signal.c0000644000000000000000000000545712166266754013651 0ustar /* * Signal compat. * * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include /* * alarm() for win32 */ #ifdef WIN32 struct AlarmCtx { struct sigaction sa; HANDLE event; HANDLE thread; int secs; }; static volatile struct AlarmCtx actx; static DWORD WINAPI w32_alarm_thread(LPVOID arg) { DWORD wres; unsigned msecs; loop: if (actx.secs > 0) { msecs = actx.secs * 1000; } else { msecs = INFINITE; } wres = WaitForSingleObject(actx.event, msecs); if (wres == WAIT_OBJECT_0) { goto loop; } else if (wres == WAIT_TIMEOUT) { actx.secs = 0; if (actx.sa.sa_handler) actx.sa.sa_handler(SIGALRM); goto loop; } else { Sleep(1000); goto loop; } return 0; } unsigned int alarm(unsigned int secs) { actx.secs = secs; /* create event */ if (!actx.event) { actx.event = CreateEvent(NULL, FALSE, FALSE, NULL); if (!actx.event) return 0; } /* create or notify thread */ if (!actx.thread) { actx.thread = CreateThread(NULL, 0, w32_alarm_thread, NULL, 0, NULL); } else { SetEvent(actx.event); } return 0; } #endif #ifndef HAVE_SIGACTION int sigaction(int sig, const struct sigaction *sa, struct sigaction *old) { #ifdef WIN32 if (sig == SIGALRM) { if (old) *old = actx.sa; if (sa) actx.sa = *sa; else actx.sa.sa_handler = NULL; return 0; } #endif old->sa_handler = signal(sig, sa->sa_handler); if (old->sa_handler == SIG_ERR) return -1; return 0; } #endif #ifdef WIN32 /* Only sig=0 is supported, to detect if process is running (ESRCH->not) */ int kill(int pid, int sig) { HANDLE hProcess; DWORD exitCode; int ret = 0; /* handle only sig == 0 */ if (sig != 0) { errno = EINVAL; return -1; } hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, pid); if (hProcess == NULL) { if (GetLastError() == ERROR_INVALID_PARAMETER) ret = ESRCH; else ret = EPERM; } else { /* OpenProcess may succed for exited processes */ if (GetExitCodeProcess(hProcess, &exitCode)) { if (exitCode != STILL_ACTIVE) ret = ESRCH; } CloseHandle(hProcess); } if (ret) { errno = ret; return -1; } else return 0; } #endif skytools-3.2.6/lib/usual/fnmatch.c0000644000000000000000000001372212166266754014006 0ustar /* * fnmatch.c * * Copyright (c) 2012 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * Differences from POSIX: * - ^ can be used in place of ! * - \ is escape in bracket expression, unless FNM_NOESCAPE is given. * - FNM_CASEFOLD * - FNM_LEADING_DIR */ #include #include #include #ifdef NEED_USUAL_FNMATCH /* compare chars with case folding */ static inline bool cmp_fold(wchar_t c1, wchar_t c2, int flags) { if (c1 == c2) return true; if (flags & FNM_CASEFOLD) { if (iswupper(c1) && iswlower(c2)) return c1 == (wchar_t)towupper(c2); else if (iswlower(c1) && iswupper(c2)) return c1 == (wchar_t)towlower(c2); } return false; } /* compare char to range with case folding */ static inline bool range_fold(wchar_t c, wchar_t r1, wchar_t r2, int flags) { if (c >= r1 && c <= r2) return true; if (flags & FNM_CASEFOLD) { /* convert only if it makes sense */ if (iswupper(c) && iswlower(r1) && iswlower(r2)) { c = towlower(c); if (c >= r1 && c <= r2) return true; } else if (iswlower(c) && iswupper(r1) && iswupper(r2)) { c = towupper(c); if (c >= r1 && c <= r2) return true; } } return false; } /* match bracket expression */ static const wchar_t *match_class(const wchar_t *pat, wchar_t c, int flags) { const wchar_t *p = pat; const wchar_t *start; bool neg = false; bool match = false; bool fallback_ok = true; const wchar_t *n1, *n2; wctype_t wct; /* negation */ if (*p == '!' || *p == '^') { neg = true; p++; } start = p; loop: /* named class, equivalence class or collating symbol */ if (p[0] == '[' && (p[1] == ':' || p[1] == '.' || p[1] == '=')) { n1 = p + 2; n2 = wcschr(n1, p[1]); if (!n2 || n2[1] != ']') goto parse_fail; if (p[1] != ':') return NULL; p = n2 + 2; wct = wctype_wcsn(n1, n2-n1); if (wct == (wctype_t)0) return NULL; if (iswctype(c, wct)) match = true; fallback_ok = false; /* skip rest */ goto loop; } parse_fail: /* unexpected pattern end */ if (p[0] == '\0') { /* only open bracket exists, take it as literal */ if (fallback_ok && c == '[') return pat - 1; return NULL; } /* closing bracket */ if (p[0] == ']' && p != start) return (match ^ neg) ? p : NULL; /* escape next char */ if (p[0] == '\\' && !(flags & FNM_NOESCAPE)) { if (p[1] == '\0') return NULL; p++; } /* its either simple range or char */ if (p[1] == '-' && p[2] != ']' && p[2] != '\0') { wchar_t r1 = p[0]; wchar_t r2 = p[2]; if (r2 == '\\' && !(flags & FNM_NOESCAPE)) { p++; r2 = p[2]; if (r2 == '\0') return NULL; } if (range_fold(c, r1, r2, flags)) match = true; p += 3; } else { if (cmp_fold(c, p[0], flags)) match = true; p++; } goto loop; } /* * FNM_PATHNAME disallows wildcard match for '/', * FNM_PERIOD disallows wildcard match for leading '.', * check for string end also. */ static bool disallow_wildcard(const wchar_t *s, const wchar_t *str, int flags) { if (*s == '\0') return true; if (*s == '/') return (flags & FNM_PATHNAME); if (*s == '.' && (flags & FNM_PERIOD)) { if (s == str) return true; if (s[-1] == '/' && (flags & FNM_PATHNAME)) return true; } return false; } /* * Non-recursive fnmatch(), based on globmatch() by */ static int wfnmatch(const wchar_t *pat, const wchar_t *str, int flags) { const wchar_t *p = pat; const wchar_t *s = str; const wchar_t *retry_p = NULL; const wchar_t *skip_s = NULL; loop: switch (*p) { case '*': /* match any number of chars from this position on */ retry_p = p + 1; skip_s = s; /* dot after '*' must not match leading dot */ if (p[1] == '.' && disallow_wildcard(s, str, flags)) return FNM_NOMATCH; break; case '?': /* match any char */ if (disallow_wildcard(s, str, flags)) goto nomatch_retry; s++; break; case '[': /* match character class */ if (disallow_wildcard(s, str, flags)) goto nomatch_retry; p = match_class(p + 1, *s, flags); if (p == NULL) goto nomatch_retry; s++; break; case '\\': /* escape next char */ if (!(flags & FNM_NOESCAPE)) { p++; if (*p == '\0') return FNM_NOMATCH; } default: /* match single char */ if (*s == '/' && *p == '\0' && (flags & FNM_LEADING_DIR)) return 0; if (!cmp_fold(*p, *s, flags)) goto nomatch_retry; if (*s == '\0') return 0; s++; } p++; goto loop; nomatch_retry: /* eat chars with '*', if possible */ if (retry_p == NULL || *s == '\0') return FNM_NOMATCH; s = skip_s++; p = retry_p; if (*s == '\0') return (*p == '\0') ? 0 : FNM_NOMATCH; if (disallow_wildcard(s, str, flags)) return FNM_NOMATCH; s++; goto loop; } /* * Convert locale-specific encoding to wchar_t string */ int fnmatch(const char *pat, const char *str, int flags) { const wchar_t *wpat, *wstr; wchar_t pbuf[128]; wchar_t sbuf[128]; int plen = strlen(pat); int slen = strlen(str); int res; /* convert encoding */ wpat = mbstr_decode(pat, plen, NULL, pbuf, sizeof(pbuf) / sizeof(wchar_t), false); if (!wpat) return (errno == EILSEQ) ? FNM_NOMATCH : -1; wstr = mbstr_decode(str, slen, NULL, sbuf, sizeof(sbuf) / sizeof(wchar_t), true); if (!wstr) return -1; /* run actual fnmatch */ res = wfnmatch(wpat, wstr, flags); /* free buffers */ if (wstr != sbuf) free(wstr); if (wpat != pbuf) free(wpat); return res; } #endif skytools-3.2.6/lib/usual/endian.h0000644000000000000000000001075612166266754013635 0ustar /* * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * * Endianess conversion macros. */ #ifndef _USUAL_ENDIAN_H_ #define _USUAL_ENDIAN_H_ #include #ifdef HAVE_ENDIAN_H #include #endif #ifdef HAVE_SYS_ENDIAN_H #include #endif #ifdef HAVE_BYTESWAP_H #include #endif #include /** * @name Always swap. * @{ */ #ifndef bswap16 #ifdef bswap_16 #define bswap16(x) bswap_16(x) #else static inline uint16_t bswap16(uint16_t x) { return (x << 8) | (x >> 8); } #endif #endif #ifndef bswap32 #ifdef bswap_32 #define bswap32(x) bswap_32(x) #else static inline uint32_t bswap32(uint32_t x) { #if defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3))) return __builtin_bswap32(x); #else x = ((x << 8) & 0xFF00FF00) | ((x >> 8) & 0x00FF00FF); return (x << 16) | (x >> 16); #endif } #endif #endif #ifndef bswap64 #ifdef bswap_64 #define bswap64(x) bswap_64(x) #else static inline uint64_t bswap64(uint64_t x) { return ((uint64_t)bswap32(x) << 32) | bswap32(x >> 32); } #endif #endif /* @} */ /** * @name Host <-> LE/BE * @{ */ /* Ignore OS defines, as they may define only some subset of functions */ #undef htobe16 #undef htobe32 #undef htobe64 #undef htole16 #undef htole32 #undef htole64 #undef be16toh #undef be32toh #undef be64toh #undef le16toh #undef le32toh #undef le64toh #ifdef WORDS_BIGENDIAN #define htobe16(x) ((uint16_t)(x)) #define htobe32(x) ((uint32_t)(x)) #define htobe64(x) ((uint64_t)(x)) #define htole16(x) bswap16(x) #define htole32(x) bswap32(x) #define htole64(x) bswap64(x) #define be16toh(x) ((uint16_t)(x)) #define be32toh(x) ((uint32_t)(x)) #define be64toh(x) ((uint64_t)(x)) #define le16toh(x) bswap16(x) #define le32toh(x) bswap32(x) #define le64toh(x) bswap64(x) #else #define htobe16(x) bswap16(x) #define htobe32(x) bswap32(x) #define htobe64(x) bswap64(x) #define htole16(x) ((uint16_t)(x)) #define htole32(x) ((uint32_t)(x)) #define htole64(x) ((uint64_t)(x)) #define be16toh(x) bswap16(x) #define be32toh(x) bswap32(x) #define be64toh(x) bswap64(x) #define le16toh(x) ((uint16_t)(x)) #define le32toh(x) ((uint32_t)(x)) #define le64toh(x) ((uint64_t)(x)) #endif /* @} */ #ifndef HAVE_ENCDEC_FUNCS /** * @name Read LE/BE values from memory and convert to host format * @{ */ static inline uint16_t be16dec(const void *p) { uint16_t tmp; memcpy(&tmp, p, sizeof(tmp)); return htobe16(tmp); } static inline uint32_t be32dec(const void *p) { uint32_t tmp; memcpy(&tmp, p, sizeof(tmp)); return htobe32(tmp); } static inline uint64_t be64dec(const void *p) { uint64_t tmp; memcpy(&tmp, p, sizeof(tmp)); return htobe64(tmp); } static inline uint16_t le16dec(const void *p) { uint16_t tmp; memcpy(&tmp, p, sizeof(tmp)); return htole16(tmp); } static inline uint32_t le32dec(const void *p) { uint32_t tmp; memcpy(&tmp, p, sizeof(tmp)); return htole32(tmp); } static inline uint64_t le64dec(const void *p) { uint64_t tmp; memcpy(&tmp, p, sizeof(tmp)); return htole64(tmp); } /* @} */ /** * @name Convert host value to LE/BE and write to memory * @{ */ static inline void be16enc(void *p, uint16_t x) { uint16_t tmp = htobe16(x); memcpy(p, &tmp, sizeof(tmp)); } static inline void be32enc(void *p, uint32_t x) { uint32_t tmp = htobe32(x); memcpy(p, &tmp, sizeof(tmp)); } static inline void be64enc(void *p, uint64_t x) { uint64_t tmp = htobe64(x); memcpy(p, &tmp, sizeof(tmp)); } static inline void le16enc(void *p, uint16_t x) { uint16_t tmp = htole16(x); memcpy(p, &tmp, sizeof(tmp)); } static inline void le32enc(void *p, uint32_t x) { uint32_t tmp = htole32(x); memcpy(p, &tmp, sizeof(tmp)); } static inline void le64enc(void *p, uint64_t x) { uint64_t tmp = htole64(x); memcpy(p, &tmp, sizeof(tmp)); } /* @} */ #endif /* !HAVE_ENCDEC_FUNCS */ #endif /* _USUAL_ENDIAN_H_ */ skytools-3.2.6/lib/usual/time.c0000644000000000000000000001020212166266754013312 0ustar /* * Common time functions. * * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include char *format_time_ms(usec_t time, char *dst, unsigned dstlen) { struct tm *tm, tmbuf; struct timeval tv; time_t sec; if (!time) { gettimeofday(&tv, NULL); } else { tv.tv_sec = time / USEC; tv.tv_usec = time % USEC; } sec = tv.tv_sec; tm = localtime_r(&sec, &tmbuf); snprintf(dst, dstlen, "%04d-%02d-%02d %02d:%02d:%02d.%03d", tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec, (int)(tv.tv_usec / 1000)); return dst; } char *format_time_s(usec_t time, char *dst, unsigned dstlen) { time_t s; struct tm tbuf, *tm; if (!time) { struct timeval tv; gettimeofday(&tv, NULL); s = tv.tv_sec; } else { s = time / USEC; } tm = localtime_r(&s, &tbuf); snprintf(dst, dstlen, "%04d-%02d-%02d %02d:%02d:%02d", tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); return dst; } /* read current time */ usec_t get_time_usec(void) { struct timeval tv; gettimeofday(&tv, NULL); return (usec_t)tv.tv_sec * USEC + tv.tv_usec; } static usec_t _time_cache; /* read cached time */ usec_t get_cached_time(void) { if (!_time_cache) _time_cache = get_time_usec(); return _time_cache; } /* forget cached time, let next read fill it */ void reset_time_cache(void) { _time_cache = 0; } /* * win32 compat */ #ifdef WIN32 /* unix epoch (1970) in seconds from windows epoch (1601) */ #define UNIX_EPOCH 11644473600LL /* 1 sec in 100 nsec units */ #define FT_SEC 10000000LL static void ft2tv(FILETIME *src, struct timeval *dst, bool use_epoch) { ULARGE_INTEGER tmp; tmp.LowPart = src->dwLowDateTime; tmp.HighPart = src->dwHighDateTime; dst->tv_sec = (tmp.QuadPart / FT_SEC) - (use_epoch ? UNIX_EPOCH : 0); dst->tv_usec = (tmp.QuadPart % FT_SEC) / 10; } #ifndef HAVE_GETTIMEOFDAY int gettimeofday(struct timeval * tp, void * tzp) { FILETIME file_time; SYSTEMTIME system_time; /* read UTC timestamp */ GetSystemTime(&system_time); SystemTimeToFileTime(&system_time, &file_time); /* convert to timeval */ ft2tv(&file_time, tp, true); return 0; } #endif /* !HAVE_GETTIMEOFDAY */ #ifndef HAVE_LOCALTIME_R struct tm *localtime_r(const time_t *tp, struct tm *dst) { ULARGE_INTEGER utc; FILETIME ft_utc; SYSTEMTIME st_utc, st_local; /* convert time_t to FILETIME */ utc.QuadPart = (*tp + UNIX_EPOCH) * FT_SEC; ft_utc.dwLowDateTime = utc.LowPart; ft_utc.dwHighDateTime = utc.HighPart; /* split to parts and get local time */ if (!FileTimeToSystemTime(&ft_utc, &st_utc)) return NULL; if (!SystemTimeToTzSpecificLocalTime(NULL, &st_utc, &st_local)) return NULL; /* fill struct tm */ dst->tm_sec = st_local.wSecond; dst->tm_min = st_local.wMinute; dst->tm_hour = st_local.wHour; dst->tm_mday = st_local.wDay; dst->tm_mon = st_local.wMonth - 1; dst->tm_year = st_local.wYear - 1900; dst->tm_wday = st_local.wDayOfWeek; dst->tm_yday = 0; dst->tm_isdst = -1; return dst; } #endif /* !HAVE_LOCALTIME_R */ #ifndef HAVE_GETRUSAGE int getrusage(int who, struct rusage *dst) { FILETIME tcreate, texit, tkern, tuser; if (who != RUSAGE_SELF) { errno = EINVAL; return -1; } if (!GetProcessTimes(GetCurrentProcess(), &tcreate, &texit, &tkern, &tuser)) return -1; ft2tv(&tuser, &dst->ru_utime, false); ft2tv(&tkern, &dst->ru_stime, false); return 0; } #endif /* !HAVE_GETRUSAGE */ #endif /* WIN32 */ skytools-3.2.6/lib/usual/hashtab-impl.h0000644000000000000000000001353112166266754014742 0ustar /* * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** @file * Simple customizable hashtable implementation. * * - Fixed-size hash table, open-addressed * - Extended by linking several together * - Resizable by copying. * - Can be lockless in multi-reader, one-writer situation if * mempory barrier macros are defined. This also requires that * HashItem must not be split across cachelines. */ #include #include #ifndef HTAB_KEY_T /** Overridable type for key */ #define HTAB_KEY_T unsigned long #endif #ifndef HTAB_VAL_T /** Overridable type for value */ #define HTAB_VAL_T void * #endif #ifndef HTAB_RMB #define HTAB_RMB #endif #ifndef HTAB_WMB #define HTAB_WMB #endif /** Typedef for key */ typedef HTAB_KEY_T htab_key_t; /** Typedef for value */ typedef HTAB_VAL_T htab_val_t; #ifndef HTAB_ITEM #define HTAB_ITEM /** HashTab slot */ struct HashItem { htab_key_t key; htab_val_t value; }; #endif /** Signature for comparision function */ typedef bool (*hash_cmp_fn)(const htab_val_t curval, const void *arg); #ifndef HTAB_MAX_FILL /** Max fill percentage */ #define HTAB_MAX_FILL 75 #endif #define MASK(h) ((h)->size - 1) #define CALC_POS(h, key) ((key) & MASK(h)) #define NEXT_POS(h, pos) (((pos) * 5 + 1) & MASK(h)) #define MAX_USED(h) ((h)->size * HTAB_MAX_FILL / 100) /** Single HashTab segment */ struct HashTab { struct HashTab *next; hash_cmp_fn cmp_fn; CxMem *ca; unsigned size; unsigned used; struct HashItem tab[FLEX_ARRAY]; }; /** Initialize HashTab */ static struct HashTab *hashtab_create(unsigned size, hash_cmp_fn cmp_fn, CxMem *ca) { struct HashTab *h; unsigned len = size * sizeof(struct HashItem) + offsetof(struct HashTab, tab); h = cx_alloc0(ca, len); if (h) { h->size = size; h->cmp_fn = cmp_fn; h->ca = ca; } return h; } /** Free HashTab */ static void hashtab_destroy(struct HashTab *h) { struct HashTab *tmp; while (h) { tmp = h->next; cx_free(h->ca, h); h = tmp; } } /** Element lookup, optionally inserting new slot */ static htab_val_t *hashtab_lookup(struct HashTab *h, htab_key_t key, bool do_insert, const void *arg) { unsigned pos; struct HashItem *i; loop: /* find key, starting from pos */ pos = CALC_POS(h, key); while (h->tab[pos].value) { i = &h->tab[pos]; HTAB_RMB; if (i->key == key) { if (arg && h->cmp_fn(i->value, arg)) return &i->value; } pos = NEXT_POS(h, pos); } /* not found in this one, check chained tables */ if (h->next) { h = h->next; goto loop; } /* just lookup? */ if (!do_insert) return NULL; /* insert */ if (h->used >= MAX_USED(h)) { struct HashTab *tmp; tmp = hashtab_create(h->size, h->cmp_fn, h->ca); if (!tmp) return NULL; h->next = tmp; h = tmp; pos = CALC_POS(h, key); } h->used++; h->tab[pos].key = key; HTAB_WMB; return &h->tab[pos].value; } /* if proper pos is between src and dst, cannot move */ static bool _hashtab_slot_can_move(struct HashTab *h, unsigned dstpos, unsigned srcpos) { htab_key_t key = h->tab[srcpos].key; unsigned pos, kpos = CALC_POS(h, key); if (kpos == srcpos) return false; if (kpos == dstpos) return true; for (pos = NEXT_POS(h, dstpos); pos != srcpos; pos = NEXT_POS(h, pos)) { if (pos == kpos) return false; } return true; } /** Delete an element */ static void hashtab_delete(struct HashTab *h, htab_key_t key, void *arg) { htab_val_t *vptr; struct HashItem *hd; unsigned pos, dstpos; /* find it */ vptr = hashtab_lookup(h, key, false, arg); if (!vptr) return; /* find right tab */ hd = container_of(vptr, struct HashItem, value); while (h && ((hd < h->tab) || (hd >= h->tab + h->size))) h = h->next; /* calculate index */ dstpos = hd - h->tab; loop: /* move slot */ for (pos = NEXT_POS(h, dstpos); h->tab[pos].value; pos = NEXT_POS(h, pos)) { if (_hashtab_slot_can_move(h, dstpos, pos)) { h->tab[dstpos].key = h->tab[pos].key; h->tab[dstpos].value = h->tab[pos].value; dstpos = pos; goto loop; } } h->tab[dstpos].value = 0; HTAB_WMB; h->tab[dstpos].key = 0; h->used--; } /** Count elements and fragments */ static void hashtab_stats(struct HashTab *h, unsigned *nitem_p, unsigned *ntab_p) { unsigned n = 0, l = 0; while (h) { l++; n += h->used; h = h->next; } *nitem_p = n; *ntab_p = l; } /** Copy elements to new hashtab, perhaps with different size */ static struct HashTab *hashtab_copy(struct HashTab *h_old, unsigned newsize) { struct HashTab *h_new; unsigned i; h_new = hashtab_create(newsize, h_old->cmp_fn, h_old->ca); for (; h_old; h_old = h_old->next) { for (i = 0; i < h_old->size; i++) { struct HashItem *s = &h_old->tab[i]; htab_val_t *new_pos; if (s->value) { new_pos = hashtab_lookup(h_new, s->key, true, NULL); if (!new_pos) goto err; *new_pos = s->value; } } } return h_new; err: hashtab_destroy(h_new); return NULL; } /* example, and avoid "unused" warnings */ static inline void _hashtab_example(void) { unsigned nitem, nlink; struct HashTab *h, *h2; h = hashtab_create(1024, NULL, USUAL_ALLOC); hashtab_lookup(h, 123, true, NULL); hashtab_stats(h, &nitem, &nlink); h2 = hashtab_copy(h, 2048); hashtab_delete(h, 123, NULL); hashtab_destroy(h); hashtab_destroy(h2); } skytools-3.2.6/lib/usual/mempool.c0000644000000000000000000000346312166266754014037 0ustar /* * Simple memory pool for variable-length allocations. * * Copyright (c) 2009 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include /* * Allows allocation of several variable-sized objects, * freeing them all together. * * ToDo: make it more 'obstack'-like (???) * - free_last * - resize_last * - append */ struct MemPool { struct MemPool *prev; unsigned size; unsigned used; }; void *mempool_alloc(struct MemPool **pool, unsigned size) { struct MemPool *cur = *pool; void *ptr; unsigned nsize; size = ALIGN(size); if (cur && cur->used + size <= cur->size) { ptr = (char *)(cur + 1) + cur->used; cur->used += size; return ptr; } else { nsize = cur ? (2 * cur->size) : 512; while (nsize < size) nsize *= 2; cur = calloc(1, sizeof(*cur) + nsize); if (cur == NULL) return NULL; cur->used = size; cur->size = nsize; cur->prev = *pool; *pool = cur; return (char *)(cur + 1); } } void mempool_destroy(struct MemPool **pool) { struct MemPool *cur, *tmp; if (!pool) return; for (cur = *pool, *pool = NULL; cur; ) { tmp = cur->prev; free(cur); cur = tmp; } } skytools-3.2.6/lib/usual/socket_win32.h0000644000000000000000000001336512166266754014710 0ustar /* * Socket compat code for win32. * * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _USUAL_SOCKET_WIN32_H_ #define _USUAL_SOCKET_WIN32_H_ typedef int socklen_t; #define in_addr_t uint32_t /* * make recvmsg/sendmsg and fd related code compile */ struct iovec { void *iov_base; /* Base address. */ size_t iov_len; /* Length. */ }; struct msghdr { void *msg_name; int msg_namelen; struct iovec *msg_iov; int msg_iovlen; void *msg_control; int msg_controllen; int msg_flags; }; #ifndef SCM_RIGHTS #define SCM_RIGHTS 1 #endif #ifndef CMSG_FIRSTHDR struct cmsghdr { int cmsg_len; int cmsg_level; int cmsg_type; }; #define CMSG_DATA(cmsg) ((unsigned char *) ((struct cmsghdr *) (cmsg) + 1)) #define CMSG_ALIGN(len) (((len) + sizeof (size_t) - 1) \ & ~(sizeof (size_t) - 1)) #define CMSG_LEN(len) ((int)(CMSG_ALIGN(sizeof(struct cmsghdr))+(len))) #define CMSG_FIRSTHDR(mhdr) \ ((mhdr)->msg_controllen >= (int)sizeof(struct cmsghdr) ? \ (struct cmsghdr *)(mhdr)->msg_control : \ (struct cmsghdr *)NULL) #define CMSG_NXTHDR(mhdr, cmsg) \ (((cmsg) == NULL) ? CMSG_FIRSTHDR(mhdr) : \ (((u_char *)(cmsg) + CMSG_ALIGN((cmsg)->cmsg_len) \ + CMSG_ALIGN(sizeof(struct cmsghdr)) > \ (u_char *)((mhdr)->msg_control) + (mhdr)->msg_controllen) ? \ (struct cmsghdr *)NULL : \ (struct cmsghdr *)((u_char *)(cmsg) + CMSG_ALIGN((cmsg)->cmsg_len)))) #define CMSG_SPACE(len) (CMSG_ALIGN(sizeof(struct cmsghdr))+CMSG_ALIGN(len)) #endif /* * unify WSAGetLastError() with errno. * * and convert int <-> SOCKET. */ /* int <-> socket */ #define FD2S(fd) ((intptr_t)(fd)) #define S2FD(fd) ((int)(fd)) /* socket <-> HANDLE, plain casts */ #define FD2H(fd) ((HANDLE)FD2S(fd)) #define H2FD(h) S2FD((SOCKET)(h)) static inline int ewrap(int res) { if (res < 0) errno = WSAGetLastError(); return res; } /* proper signature for setsockopt */ static inline int w_setsockopt(int fd, int level, int optname, const void *optval, int optlen) { return ewrap(setsockopt(FD2S(fd), level, optname, optval, optlen)); } #define setsockopt(a,b,c,d,e) w_setsockopt(a,b,c,d,e) /* proper signature for send */ static inline ssize_t w_send(int fd, const void *buf, size_t len, int flags) { return ewrap(send(FD2S(fd), buf, len, flags)); } #define send(a,b,c,d) w_send(a,b,c,d) /* proper signature for recv */ static inline ssize_t w_recv(int fd, void *buf, size_t len, int flags) { return ewrap(recv(FD2S(fd), buf, len, flags)); } #define recv(a,b,c,d) w_recv(a,b,c,d) #define getsockopt(a,b,c,d,e) ewrap(getsockopt(FD2S(a),b,c,d,e)) #define connect(a,b,c) ewrap(connect(FD2S(a),b,c)) #define socket(a,b,c) ewrap(S2FD(socket(a,b,c))) #define bind(a,b,c) ewrap(bind(FD2S(a),b,c)) #define listen(a,b) ewrap(listen(FD2S(a),b)) #define accept(a,b,c) ewrap(accept(FD2S(a),b,c)) #define getpeername(a,b,c) ewrap(getpeername(FD2S(a),b,c)) #define getsockname(a,b,c) ewrap(getsockname(FD2S(a),b,c)) #define select(a,b,c,d,e) ewrap(select(a,b,c,d,e)) static inline struct hostent *w_gethostbyname(const char *n) { struct hostent *res = gethostbyname(n); if (!res) errno = WSAGetLastError(); return res; } #define gethostbyname(a) w_gethostbyname(a) /* make unix socket related code compile */ struct sockaddr_un { short sun_family; char sun_path[128]; }; /* sendmsg is not used */ static inline int sendmsg(int s, const struct msghdr *m, int flags) { if (m->msg_iovlen != 1) { errno = EINVAL; return -1; } return send(s, m->msg_iov[0].iov_base, m->msg_iov[0].iov_len, flags); } /* recvmsg() is, but only with one iov */ static inline int recvmsg(int s, struct msghdr *m, int flags) { if (m->msg_iovlen != 1) { errno = EINVAL; return -1; } if (m->msg_controllen) m->msg_controllen = 0; return recv(s, m->msg_iov[0].iov_base, m->msg_iov[0].iov_len, flags); } /* * fcntl */ #define F_GETFD 1 #define F_SETFD 2 #define F_GETFL 3 #define F_SETFL 4 #define O_NONBLOCK 1 #define FD_CLOEXEC HANDLE_FLAG_INHERIT static inline int fcntl(int fd, int cmd, long arg) { ULONG lval; DWORD dval; switch (cmd) { case F_GETFD: if (GetHandleInformation(FD2H(fd), &dval)) return dval; errno = EINVAL; return -1; case F_SETFD: /* set FD_CLOEXEC */ if (SetHandleInformation(FD2H(fd), FD_CLOEXEC, arg)) return 0; errno = EINVAL; return -1; case F_GETFL: /* O_NONBLOCK? */ return 0; case F_SETFL: /* set O_NONBLOCK */ lval = (arg & O_NONBLOCK) ? 1 : 0; if (ioctlsocket(FD2S(fd), FIONBIO, &lval) == SOCKET_ERROR) { errno = WSAGetLastError(); return -1; } return 0; default: errno = EINVAL; return -1; } } /* * SIO_KEEPALIVE_VALS for mingw32 */ #if !defined(SIO_KEEPALIVE_VALS) #define SIO_KEEPALIVE_VALS _WSAIOW(IOC_VENDOR,4) struct tcp_keepalive { u_long onoff; u_long keepalivetime; u_long keepaliveinterval; }; #endif /* * Use native poll() if available */ #if !defined(HAVE_POLL) && defined(POLLIN) #define HAVE_POLL #define poll(a,b,c) usual_poll(a,b,c) static inline int poll(struct pollfd *fds, int nfds, int timeout) { return WSAPoll(fds, nfds, timeout); } #endif #endif skytools-3.2.6/lib/COPYRIGHT0000644000000000000000000000151212166266754012356 0ustar /* * libusual - Utility library for C * * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ skytools-3.2.6/lib/m4/0000755000000000000000000000000012166266754011404 5ustar skytools-3.2.6/lib/m4/usual.m40000644000000000000000000002562212166266754013006 0ustar dnl Those depend on correct order: dnl AC_USUAL_INIT dnl AC_USUAL_PROGRAM_CHECK dnl AC_USUAL_HEADER_CHECK dnl AC_USUAL_TYPE_CHECK dnl AC_USUAL_FUNCTION_CHECK dnl Order does not matter: dnl AC_USUAL_CASSERT dnl AC_USUAL_WERROR dnl AC_USUAL_DEBUG dnl dnl AC_USUAL_INIT: dnl - Sets PORTNAME=win32/unix dnl - If building from separate dir, writes top-level Makefile (antimake) dnl dnl Also defines port-specific flags: dnl _GNU_SOURCE, _WIN32_WINNT, WIN32_LEAN_AND_MEAN dnl AC_DEFUN([AC_USUAL_INIT], [ # if building separately from srcdir, write top-level makefile if test "$srcdir" != "."; then echo "include $srcdir/Makefile" > Makefile fi AC_MSG_CHECKING([target host type]) xhost="$host_alias" if test "x$xhost" = "x"; then xhost=`uname -s` fi case "$xhost" in *cygwin* | *mingw* | *pw32* | *MINGW*) LIBS="$LIBS -lws2_32" PORTNAME=win32;; *) PORTNAME=unix ;; esac AC_SUBST(PORTNAME) AC_MSG_RESULT([$PORTNAME]) dnl Set the flags before any feature tests. if test "$PORTNAME" = "win32"; then AC_DEFINE([WIN32_LEAN_AND_MEAN], [1], [Define to request cleaner win32 headers.]) AC_DEFINE([WINVER], [0x0501], [Define to max win32 API version (0x0501=XP).]) else AC_DEFINE([_GNU_SOURCE], [1], [Define to get working glibc.]) fi dnl Package-specific data AC_SUBST([pkgdatadir], ['${datarootdir}'/${PACKAGE_TARNAME}]) dnl pkgconfig files AC_SUBST([pkgconfigdir], ['${libdir}/pkgconfig']) ]) dnl Old name for initial checks AC_DEFUN([AC_USUAL_PORT_CHECK], [AC_USUAL_INIT]) dnl dnl AC_USUAL_PROGRAM_CHECK: Simple C environment: CC, CPP, INSTALL dnl AC_DEFUN([AC_USUAL_PROGRAM_CHECK], [ AC_PROG_CC_STDC AC_PROG_CPP dnl Check if compiler supports __func__ AC_CACHE_CHECK([whether compiler supports __func__], pgac_cv_funcname_func, [AC_TRY_COMPILE([#include ], [printf("%s\n", __func__);], [pgac_cv_funcname_func=yes], [pgac_cv_funcname_func=no])]) if test x"$pgac_cv_funcname_func" = xyes ; then AC_DEFINE(HAVE_FUNCNAME__FUNC, 1, [Define to 1 if your compiler understands __func__.]) fi dnl Check if linker supports -Wl,--as-needed if test "$GCC" = "yes"; then old_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS -Wl,--as-needed" AC_MSG_CHECKING([whether linker supports --as-needed]) AC_LINK_IFELSE([AC_LANG_SOURCE([int main(void) { return 0; }])], [AC_MSG_RESULT([yes])], [AC_MSG_RESULT([no]) LDFLAGS="$old_LDFLAGS"]) fi dnl Check if compiler supports gcc-style dependencies AC_MSG_CHECKING([whether compiler supports dependency generation]) old_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -MD -MP -MT conftest.o -MF conftest.o.d" AC_COMPILE_IFELSE([AC_LANG_SOURCE([void foo(void){}])], [HAVE_CC_DEPFLAG=yes], [HAVE_CC_DEPFLAG=no]) rm -f conftest.d CFLAGS="$old_CFLAGS" AC_MSG_RESULT([$HAVE_CC_DEPFLAG]) AC_SUBST(HAVE_CC_DEPFLAG) dnl Pick good warning flags for gcc WFLAGS="" if test x"$GCC" = xyes; then AC_MSG_CHECKING([for working warning switches]) good_CFLAGS="$CFLAGS" flags="-Wall -Wextra" # turn off noise from Wextra flags="$flags -Wno-unused-parameter -Wno-missing-field-initializers" # Wextra does not turn those on? flags="$flags -Wmissing-prototypes -Wpointer-arith -Wendif-labels" flags="$flags -Wdeclaration-after-statement -Wold-style-definition" flags="$flags -Wstrict-prototypes -Wundef -Wformat=2" flags="$flags -Wuninitialized" for f in $flags; do CFLAGS="$good_CFLAGS $WFLAGS $f" AC_COMPILE_IFELSE([AC_LANG_SOURCE([void foo(void){}])], [WFLAGS="$WFLAGS $f"]) done # avoid -Wextra if missing-field.initializers does not work echo "$WFLAGS" | grep missing-field-initializers > /dev/null \ || WFLAGS=`echo "$WFLAGS"|sed 's/ -Wextra//'` CFLAGS="$good_CFLAGS" AC_MSG_RESULT([done]) fi AC_SUBST(WFLAGS) AC_PROG_INSTALL AC_PROG_LN_S AC_PROG_EGREP AC_PROG_AWK dnl AC_PROG_MKDIR_P and AC_PROG_SED are from newer autotools m4_ifdef([AC_PROG_MKDIR_P], [ AC_PROG_MKDIR_P ], [ MKDIR_P="mkdir -p" AC_SUBST(MKDIR_P) ]) m4_ifdef([AC_PROG_SED], [ AC_PROG_SED ], [ SED="sed" AC_SUBST(SED) ]) AC_CHECK_TOOL([STRIP], [strip]) AC_CHECK_TOOL([RANLIB], [ranlib], [true]) AC_CHECK_TOOL([AR], [ar]) ARFLAGS=rcu AC_SUBST(ARFLAGS) ]) dnl dnl AC_USUAL_TYPE_CHECK: Basic types for C dnl AC_DEFUN([AC_USUAL_TYPE_CHECK], [ AC_C_INLINE AC_C_RESTRICT AC_C_BIGENDIAN AC_SYS_LARGEFILE AC_TYPE_PID_T AC_TYPE_UID_T AC_TYPE_SIZE_T ]) dnl dnl AC_USUAL_HEADER_CHECK: Basic headers dnl AC_DEFUN([AC_USUAL_HEADER_CHECK], [ AC_CHECK_HEADERS([inttypes.h stdbool.h unistd.h sys/time.h]) AC_CHECK_HEADERS([sys/socket.h poll.h sys/poll.h sys/un.h]) AC_CHECK_HEADERS([arpa/inet.h netinet/in.h netinet/tcp.h]) AC_CHECK_HEADERS([sys/param.h sys/uio.h pwd.h grp.h]) AC_CHECK_HEADERS([sys/wait.h sys/mman.h syslog.h netdb.h dlfcn.h]) AC_CHECK_HEADERS([err.h pthread.h endian.h sys/endian.h byteswap.h]) AC_CHECK_HEADERS([malloc.h regex.h getopt.h fnmatch.h]) dnl ucred.h may have prereqs AC_CHECK_HEADERS([ucred.h sys/ucred.h], [], [], [ #ifdef HAVE_SYS_TYPES_H #include #endif #ifdef HAVE_SYS_PARAM_H #include #endif ]) ]) dnl dnl AC_USUAL_FUNCTION_CHECK: Basic functions dnl AC_DEFUN([AC_USUAL_FUNCTION_CHECK], [ ### Functions provided if missing dnl AC_CHECK_FUNCS(basename dirname) # unstable, provide always AC_CHECK_FUNCS(strlcpy strlcat getpeereid sigaction sigqueue) AC_CHECK_FUNCS(inet_ntop inet_pton poll getline memrchr regcomp) AC_CHECK_FUNCS(err errx warn warnx getprogname setprogname) AC_CHECK_FUNCS(posix_memalign memalign valloc) AC_CHECK_FUNCS(getopt getopt_long getopt_long_only) AC_CHECK_FUNCS(fls flsl flsll ffs ffsl ffsll) AC_CHECK_FUNCS(fnmatch mbsnrtowcs) ### Functions provided only on win32 AC_CHECK_FUNCS(localtime_r gettimeofday recvmsg sendmsg usleep getrusage) ### Functions used by libusual itself AC_CHECK_FUNCS(syslog mmap getpeerucred) ### win32: link with ws2_32 AC_SEARCH_LIBS(WSAGetLastError, ws2_32) AC_FUNC_STRERROR_R ### AC_MSG_CHECKING([for integer enc/dec functions]) AC_LINK_IFELSE([AC_LANG_SOURCE([ #include #ifdef HAVE_SYS_ENDIAN_H #include #endif #ifdef HAVE_ENDIAN_H #include #endif char p[[]] = "01234567"; int main(void) { be16enc(p, 0); be32enc(p, 1); be64enc(p, 2); le16enc(p, 2); le32enc(p, 3); le64enc(p, 4); return (int)(be16dec(p) + be32dec(p) + be64dec(p)) + (int)(le16dec(p) + le32dec(p) + le64dec(p)); } ])], [ AC_MSG_RESULT([found]) AC_DEFINE([HAVE_ENCDEC_FUNCS], [1], [Define if *enc & *dec functions are available]) ], [AC_MSG_RESULT([not found])]) ]) dnl dnl AC_USUAL_CASSERT: --enable-cassert switch to set macro CASSERT dnl AC_DEFUN([AC_USUAL_CASSERT], [ AC_ARG_ENABLE(cassert, AC_HELP_STRING([--enable-cassert],[turn on assert checking in code])) AC_MSG_CHECKING([whether to enable asserts]) if test "$enable_cassert" = "yes"; then AC_DEFINE(CASSERT, 1, [Define to enable assert checking]) AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) fi ]) dnl dnl AC_USUAL_WERROR: --enable-werror switch to turn warnings into errors dnl AC_DEFUN([AC_USUAL_WERROR], [ AC_ARG_ENABLE(werror, AC_HELP_STRING([--enable-werror],[add -Werror to CFLAGS])) AC_MSG_CHECKING([whether to fail on warnings]) if test "$enable_werror" = "yes"; then CFLAGS="$CFLAGS -Werror" AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) fi ]) dnl dnl AC_USUAL_DEBUG: --disable-debug switch to strip binary dnl AC_DEFUN([AC_USUAL_DEBUG], [ AC_ARG_ENABLE(debug, AC_HELP_STRING([--disable-debug],[strip binary]), [], [enable_debug=yes]) AC_MSG_CHECKING([whether to build debug binary]) if test "$enable_debug" = "yes"; then LDFLAGS="-g $LDFLAGS" BININSTALL="$INSTALL" AC_MSG_RESULT([yes]) else BININSTALL="$INSTALL -s" AC_MSG_RESULT([no]) fi AC_SUBST(enable_debug) ]) dnl dnl AC_USUAL_LIBEVENT: --with-libevent dnl dnl AC_USUAL_LIBEVENT - prefer-yes: dnl default - search for libevent, error if not found dnl --with - search for libevent, error if not found dnl --without - use libusual dnl dnl AC_USUAL_LIBEVENT_OPT - prefer-no: dnl default - use libusual dnl --with - search for libevent, error if not found dnl --without - use libusual dnl AC_DEFUN([AC_USUAL_LIBEVENT_OPT], [AC_USUAL_LIBEVENT(1)]) AC_DEFUN([AC_USUAL_LIBEVENT], [ ifelse([$#], [0], [levent=yes], [levent=no]) AC_MSG_CHECKING([for libevent]) AC_ARG_WITH(libevent, AC_HELP_STRING([--with-libevent=prefix],[Specify where libevent is installed]), [ if test "$withval" = "no"; then levent=no elif test "$withval" = "yes"; then levent=yes else levent=yes CPPFLAGS="$CPPFLAGS -I$withval/include" LDFLAGS="$LDFLAGS -L$withval/lib" fi ], []) if test "$levent" = "no"; then AC_MSG_RESULT([using usual/event]) AC_DEFINE(HAVE_EVENT_LOOPBREAK, 1, [usual/event.h has it.]) AC_DEFINE(HAVE_EVENT_BASE_NEW, 1, [usual/event.h has it.]) have_libevent=no else # libevent AC_DEFINE(HAVE_LIBEVENT, 1, [Use real libevent.]) LIBS="-levent $LIBS" AC_LINK_IFELSE([AC_LANG_SOURCE([ #include #include #include #include int main(void) { struct event ev; event_init(); event_set(&ev, 1, EV_READ, NULL, NULL); /* this checks for 1.2+ but next we check for 1.3b+ anyway */ /* event_base_free(NULL); */ } ])], [AC_MSG_RESULT([found])], [AC_MSG_ERROR([not found, cannot proceed])]) dnl libevent < 1.3b crashes on event_base_free() dnl no good way to check libevent version. use hack: dnl evhttp.h defines HTTP_SERVUNAVAIL only since 1.3b AC_MSG_CHECKING([whether libevent version >= 1.3b]) AC_EGREP_CPP([HTTP_SERVUNAVAIL], [#include HTTP_SERVUNAVAIL ], [AC_MSG_ERROR([no, cannot proceed])], [AC_MSG_RESULT([yes])]) AC_CHECK_FUNCS(event_loopbreak event_base_new evdns_base_new) have_libevent=yes fi # libevent AC_SUBST(have_libevent) ]) dnl AC_USUAL_LIBEVENT dnl dnl AC_USUAL_UREGEX: --with-uregex dnl dnl Allow override of system regex dnl AC_DEFUN([AC_USUAL_UREGEX], [ AC_MSG_CHECKING([whether to force internal regex]) uregex=no AC_ARG_WITH(uregex, AC_HELP_STRING([--with-uregex],[Force use of internal regex]), [ if test "$withval" = "yes"; then uregex=yes fi ], []) if test "$uregex" = "yes"; then AC_MSG_RESULT([yes]) AC_DEFINE(USE_INTERNAL_REGEX, 1, [Define to force use of uRegex.]) else AC_MSG_RESULT([no]) fi ]) dnl AC_USUAL_UREGEX dnl dnl AC_USUAL_GETADDRINFO_A - getaddrinfo_a() is required dnl AC_DEFUN([AC_USUAL_GETADDRINFO_A], [ AC_SEARCH_LIBS(getaddrinfo_a, anl) AC_CACHE_CHECK([whether to use native getaddinfo_a], ac_cv_usual_glibc_gaia, [AC_TRY_LINK([ #include #ifdef HAVE_NETDB_H #include #endif ], [ #if __GLIBC_PREREQ(2,9) getaddrinfo_a(0,NULL,0,NULL); #else none or broken #endif ], [ac_cv_usual_glibc_gaia=yes], [ac_cv_usual_glibc_gaia=no])]) if test x"$ac_cv_usual_glibc_gaia" = xyes ; then AC_DEFINE(HAVE_GETADDRINFO_A, 1, [Define to 1 if you have the getaddrinfo_a() function.]) else ACX_PTHREAD(, [AC_MSG_RESULT([Threads not available and fallback getaddrinfo_a() non-functional.])]) CC="$PTHREAD_CC" CFLAGS="$CFLAGS $PTHREAD_CFLAGS" LIBS="$LIBS $PTHREAD_LIBS" fi ]) skytools-3.2.6/lib/m4/acx_pthread.m40000644000000000000000000001613112166266754014132 0ustar AC_DEFUN([ACX_PTHREAD], [ AC_REQUIRE([AC_CANONICAL_HOST]) AC_LANG_SAVE AC_LANG_C acx_pthread_ok=no # We used to check for pthread.h first, but this fails if pthread.h # requires special compiler flags (e.g. on True64 or Sequent). # It gets checked for in the link test anyway. # First of all, check if the user has set any of the PTHREAD_LIBS, # etcetera environment variables, and if threads linking works using # them: if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then save_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS $PTHREAD_CFLAGS" save_LIBS="$LIBS" LIBS="$PTHREAD_LIBS $LIBS" AC_MSG_CHECKING([for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS]) AC_TRY_LINK_FUNC(pthread_join, acx_pthread_ok=yes) AC_MSG_RESULT($acx_pthread_ok) if test x"$acx_pthread_ok" = xno; then PTHREAD_LIBS="" PTHREAD_CFLAGS="" fi LIBS="$save_LIBS" CFLAGS="$save_CFLAGS" fi # We must check for the threads library under a number of different # names; the ordering is very important because some systems # (e.g. DEC) have both -lpthread and -lpthreads, where one of the # libraries is broken (non-POSIX). # Create a list of thread flags to try. Items starting with a "-" are # C compiler flags, and other items are library names, except for "none" # which indicates that we try without any flags at all, and "pthread-config" # which is a program returning the flags for the Pth emulation library. acx_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config" # The ordering *is* (sometimes) important. Some notes on the # individual items follow: # pthreads: AIX (must check this before -lpthread) # none: in case threads are in libc; should be tried before -Kthread and # other compiler flags to prevent continual compiler warnings # -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) # -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) # lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) # -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) # -pthreads: Solaris/gcc # -mthreads: Mingw32/gcc, Lynx/gcc # -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it # doesn't hurt to check since this sometimes defines pthreads too; # also defines -D_REENTRANT) # ... -mt is also the pthreads flag for HP/aCC # pthread: Linux, etcetera # --thread-safe: KAI C++ # pthread-config: use pthread-config program (for GNU Pth library) case "${host_cpu}-${host_os}" in *solaris*) # On Solaris (at least, for some versions), libc contains stubbed # (non-functional) versions of the pthreads routines, so link-based # tests will erroneously succeed. (We need to link with -pthreads/-mt/ # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather # a function called by this macro, so we could check for that, but # who knows whether they'll stub that too in a future libc.) So, # we'll just look for -pthreads and -lpthread first: acx_pthread_flags="-pthreads pthread -mt -pthread $acx_pthread_flags" ;; esac if test x"$acx_pthread_ok" = xno; then for flag in $acx_pthread_flags; do case $flag in none) AC_MSG_CHECKING([whether pthreads work without any flags]) ;; -*) AC_MSG_CHECKING([whether pthreads work with $flag]) PTHREAD_CFLAGS="$flag" ;; pthread-config) AC_CHECK_PROG(acx_pthread_config, pthread-config, yes, no) if test x"$acx_pthread_config" = xno; then continue; fi PTHREAD_CFLAGS="`pthread-config --cflags`" PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" ;; *) AC_MSG_CHECKING([for the pthreads library -l$flag]) PTHREAD_LIBS="-l$flag" ;; esac save_LIBS="$LIBS" save_CFLAGS="$CFLAGS" LIBS="$PTHREAD_LIBS $LIBS" CFLAGS="$CFLAGS $PTHREAD_CFLAGS" # Check for various functions. We must include pthread.h, # since some functions may be macros. (On the Sequent, we # need a special flag -Kthread to make this header compile.) # We check for pthread_join because it is in -lpthread on IRIX # while pthread_create is in libc. We check for pthread_attr_init # due to DEC craziness with -lpthreads. We check for # pthread_cleanup_push because it is one of the few pthread # functions on Solaris that doesn't have a non-functional libc stub. # We try pthread_create on general principles. AC_TRY_LINK([#include <pthread.h>], [pthread_t th; pthread_join(th, 0); pthread_attr_init(0); pthread_cleanup_push(0, 0); pthread_create(0,0,0,0); pthread_cleanup_pop(0); ], [acx_pthread_ok=yes]) LIBS="$save_LIBS" CFLAGS="$save_CFLAGS" AC_MSG_RESULT($acx_pthread_ok) if test "x$acx_pthread_ok" = xyes; then break; fi PTHREAD_LIBS="" PTHREAD_CFLAGS="" done fi # Various other checks: if test "x$acx_pthread_ok" = xyes; then save_LIBS="$LIBS" LIBS="$PTHREAD_LIBS $LIBS" save_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS $PTHREAD_CFLAGS" # Detect AIX lossage: JOINABLE attribute is called UNDETACHED. AC_MSG_CHECKING([for joinable pthread attribute]) attr_name=unknown for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do AC_TRY_LINK([#include <pthread.h>], [int attr=$attr; return attr;], [attr_name=$attr; break]) done AC_MSG_RESULT($attr_name) if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then AC_DEFINE_UNQUOTED(PTHREAD_CREATE_JOINABLE, $attr_name, [Define to necessary symbol if this constant uses a non-standard name on your system.]) fi AC_MSG_CHECKING([if more special flags are required for pthreads]) flag=no case "${host_cpu}-${host_os}" in *-aix* | *-freebsd* | *-darwin*) flag="-D_THREAD_SAFE";; *solaris* | *-osf* | *-hpux*) flag="-D_REENTRANT";; esac AC_MSG_RESULT(${flag}) if test "x$flag" != xno; then PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS" fi LIBS="$save_LIBS" CFLAGS="$save_CFLAGS" # More AIX lossage: must compile with xlc_r or cc_r if test x"$GCC" != xyes; then AC_CHECK_PROGS(PTHREAD_CC, xlc_r cc_r, ${CC}) else PTHREAD_CC=$CC fi else PTHREAD_CC="$CC" fi AC_SUBST(PTHREAD_LIBS) AC_SUBST(PTHREAD_CFLAGS) AC_SUBST(PTHREAD_CC) # Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: if test x"$acx_pthread_ok" = xyes; then ifelse([$1],,AC_DEFINE(HAVE_PTHREAD,1,[Define if you have POSIX threads libraries and header files.]),[$1]) : else acx_pthread_ok=no $2 fi AC_LANG_RESTORE ])dnl ACX_PTHREAD skytools-3.2.6/lib/m4/antimake.m40000644000000000000000000000033612166266754013441 0ustar dnl dnl AMK_INIT: Generate initial makefile dnl AC_DEFUN([AMK_INIT], [ # if building separately from srcdir, write top-level makefile if test "$srcdir" != "."; then echo "include $srcdir/Makefile" > Makefile fi ]) skytools-3.2.6/lib/doc/0000755000000000000000000000000012166266754011631 5ustar skytools-3.2.6/lib/doc/mainpage.dox0000644000000000000000000001212712166266754014131 0ustar /** * @mainpage * * @section libusual libusual * * libusual is utility library. * * Unlike APR or GLIB which create their own API world, libusual * tries to use standardized API's whenever possible. * * Goals for portability APIs: * - Follow modern POSIX, BSD, glibc. Make the APIs available everywhere. * - If compat is impossible, allow the user code to compile - eg. UNIX * sockets on win32. * - Assume cooparating user: * - libusual needs to implement only API that are used and useful. No need try to provide full POSIX. * - user code survives gracefully when libusual provides less functionality. * * Goals for new APIs: * - Simple, clear API * - Simple, clear implementation. * - It is preferable to have simple code which can be copied and modified * for some special case than complex code that tries to handle * everything at once. * * @section antimake Antimake build system. * * Build system demos and docs. * * @section modules Module list. * *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
Compat includes
Base C environment
ctype compat
Command line argument processing
Error handling for command-line tools
Async DNS lookup
Pthreads compat
Signal compat
Socket compat and helper functions
String compat and helper functions
Time compat and helper functions
fnmatch compat
Data Structures
Binary Tree
Crit-Bit Tree
Hash table
Binary heap
Double-linked list
Memory buffer
Minimal dict
Double-linked list for shared mem
List with stats
Refcounted strings
Data Processing
Bit arithmetic
Config parser
CRC32
Endianess conversion
Jenkins' lookup3 hash
Misc arithmetic
POSIX regex compat
PostgreSQL data formats
Low-level UTF8 handling
Cryptography
Common API for cryptographic message digests
HMAC with digest
MD5 hash
SHA1 hash
SHA256/224 hashes
SHA512/384 hashes
Keccak/SHA3 hashes
Memory Allocation
Context Allocator framework
Extra allocators
Simple append-only memory pool
Slab allocator for same-size objects
OS support
libevent compat
Process daemonization
Various file I/O tools
Logging framework for daemons
Async Postgres connection framework
Safety wrappers around OS I/O
*/ skytools-3.2.6/lib/doc/Doxyfile0000644000000000000000000017761412166266754013357 0ustar # Doxyfile 1.6.3 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = libusual # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = 0.1 # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = doc # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = YES # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = YES # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it parses. # With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this tag. # The format is ext=language, where ext is a file extension, and language is one of # the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP, # Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat # .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran), # use: inc=Fortran f=C. Note that for custom extensions you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen to replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = YES # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penality. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will rougly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols SYMBOL_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = YES # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = NO # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespace are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = YES # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = YES # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = NO # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = NO # If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen # will list include files with double quotes in the documentation # rather than with sharp brackets. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = NO # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the (brief and detailed) documentation of class members so that constructors and destructors are listed first. If set to NO (the default) the constructors will appear in the respective orders defined by SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 0 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by # doxygen. The layout file controls the global structure of the generated output files # in an output format independent way. The create the layout file that represents # doxygen's defaults, run doxygen with the -l option. You can optionally specify a # file name after the option, if omitted DoxygenLayout.xml will be used as the name # of the layout file. LAYOUT_FILE = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = YES # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = usual doc # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 FILE_PATTERNS = *.h *.dox # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = YES # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = YES # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = NO #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting # this to NO can help when comparing the output of multiple runs. HTML_TIMESTAMP = NO # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. For this to work a browser that supports # JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox # Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). HTML_DYNAMIC_SECTIONS = NO # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER # are set, an additional index file will be generated that can be used as input for # Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated # HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add. # For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's # filter section matches. # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files # will be generated, which together with the HTML files, form an Eclipse help # plugin. To install this plugin and make it available under the help contents # menu in Eclipse, the contents of the directory containing the HTML and XML # files needs to be copied into the plugins directory of eclipse. The name of # the directory within the plugins directory should be the same as # the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before the help appears. GENERATE_ECLIPSEHELP = NO # A unique identifier for the eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have # this name. ECLIPSE_DOC_ID = org.doxygen.Project # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. GENERATE_TREEVIEW = NO # By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories, # and Class Hierarchy pages using a tree view instead of an ordered list. USE_INLINE_TREES = NO # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # When the SEARCHENGINE tag is enabled doxygen will generate a search box for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) there is already a search function so this one should # typically be disabled. For large projects the javascript based search engine # can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. SEARCHENGINE = NO # When the SERVER_BASED_SEARCH tag is enabled the search engine will be implemented using a PHP enabled web server instead of at the web client using Javascript. Doxygen will generate the search PHP script and index # file to put on the web server. The advantage of the server based approach is that it scales better to large projects and allows full text search. The disadvances is that it is more difficult to setup # and does not have live searching capabilities. SERVER_BASED_SEARCH = NO #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. # Note that when enabling USE_PDFLATEX this option is only used for # generating bitmaps for formulas in the HTML output, but not in the # Makefile that is written to the output directory. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = NO # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = NO # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include source code with syntax highlighting in the LaTeX output. Note that which sources are shown also depends on other settings such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = NO # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # By default doxygen will write a font called FreeSans.ttf to the output # directory and reference it in all dot files that doxygen generates. This # font does not include all possible unicode characters however, so when you need # these (or just want a differently looking font) you can specify the font name # using DOT_FONTNAME. You need need to make sure dot is able to find the font, # which can be done by putting it in a standard location or by setting the # DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory # containing the font. DOT_FONTNAME = FreeSans # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the output directory to look for the # FreeSans.ttf font (which doxygen will put there itself). If you specify a # different font using DOT_FONTNAME you can set the path where dot # can find it using this tag. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = NO # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = NO # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = NO # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = NO # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = NO # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES skytools-3.2.6/lib/doc/setup.dox0000644000000000000000000000006712166266754013510 0ustar /** * @page setup How to setup libusual * * Foo */ skytools-3.2.6/lib/build_msvc.mk0000644000000000000000000000043512166266754013546 0ustar AM_FEATURES = msvc # make it work also when included from test/Makefile top_srcdir = $(dir $(filter %build.mk, $(MAKEFILE_LIST))) top_builddir = $(top_srcdir) abs_top_srcdir := $(abspath $(top_srcdir)) abs_top_builddir := $(abs_top_srcdir) include $(abs_top_srcdir)/mk/antimake.mk skytools-3.2.6/lib/config.mak.in0000644000000000000000000000232112166266754013426 0ustar PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PORTNAME = @PORTNAME@ EXEEXT = @EXEEXT@ HAVE_CC_DEPFLAG = @HAVE_CC_DEPFLAG@ CC = @CC@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CFLAGS = @CFLAGS@ DEFS = @DEFS@ WFLAGS = @WFLAGS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBS = @LIBS@ AR = @AR@ ARFLAGS = @ARFLAGS@ RANLIB = @RANLIB@ LIBTOOL = @LIBTOOL@ SHELL = @SHELL@ INSTALL = @INSTALL@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_DATA = @INSTALL_DATA@ MKDIR_P = @MKDIR_P@ SED = @SED@ AWK = @AWK@ GREP = @GREP@ EGREP = @EGREP@ STRIP = @STRIP@ prefix = @prefix@ exec_prefix = @exec_prefix@ bindir = @bindir@ includedir = @includedir@ sbindir = @sbindir@ libexecdir = @libexecdir@ datarootdir = @datarootdir@ datadir = @datadir@ sysconfdir = @sysconfdir@ docdir = @docdir@ mandir = @mandir@ libdir = @libdir@ localedir = @localedir@ pkgdatadir = @pkgdatadir@ pkgconfigdir = @pkgconfigdir@ abs_top_srcdir ?= @abs_top_srcdir@ abs_top_builddir ?= @abs_top_builddir@ nosub_top_srcdir ?= @top_srcdir@ nosub_top_builddir ?= @top_builddir@ enable_debug = @enable_debug@ skytools-3.2.6/lib/README0000644000000000000000000000011212166266754011736 0ustar = libusual = Collection of various code useful for writing server code. skytools-3.2.6/lib/Makefile0000644000000000000000000001041712166266754012527 0ustar AM_CPPFLAGS = -I$(builddir) -I$(srcdir) # main target lib_LIBRARIES = libusual.a # sources that are not always built EXTRA_libusual_a_SOURCES = usual/pgsocket.h usual/pgsocket.c # sources not in tar.gz nodist_EXTRA_libusual_a_SOURCES = usual/config.h # regular source files libusual_a_SOURCES = usual/config.h.in \ usual/aatree.h usual/aatree.c \ usual/base.h usual/base.c usual/base_win32.h \ usual/bits.h \ usual/cbtree.h usual/cbtree.c \ usual/cfparser.h usual/cfparser.c \ usual/config_msvc.h \ usual/crypto/digest.h usual/crypto/digest.c \ usual/crypto/hmac.h usual/crypto/hmac.c \ usual/crypto/keccak.h usual/crypto/keccak.c \ usual/crypto/md5.h usual/crypto/md5.c \ usual/crypto/sha1.h usual/crypto/sha1.c \ usual/crypto/sha256.h usual/crypto/sha256.c \ usual/crypto/sha512.h usual/crypto/sha512.c \ usual/ctype.h \ usual/cxalloc.h usual/cxalloc.c \ usual/cxextra.h usual/cxextra.c \ usual/daemon.h usual/daemon.c \ usual/endian.h \ usual/err.h usual/err.c \ usual/event.h usual/event.c \ usual/fileutil.h usual/fileutil.c \ usual/fnmatch.h usual/fnmatch.c \ usual/getopt.h usual/getopt.c \ usual/hashing/crc32.h usual/hashing/crc32.c \ usual/hashing/lookup3.h usual/hashing/lookup3.c \ usual/hashing/siphash.h usual/hashing/siphash.c \ usual/hashtab-impl.h \ usual/heap.h usual/heap.c \ usual/list.h usual/list.c \ usual/logging.h usual/logging.c \ usual/mbuf.h usual/mbuf.c \ usual/mdict.h usual/mdict.c \ usual/mempool.h usual/mempool.c \ usual/misc.h \ usual/netdb.h usual/netdb.c \ usual/pgutil.h usual/pgutil.c usual/pgutil_kwlookup.h \ usual/pthread.h usual/pthread.c \ usual/regex.h usual/regex.c \ usual/safeio.h usual/safeio.c \ usual/shlist.h \ usual/signal.h usual/signal.c \ usual/slab.h usual/slab.c \ usual/socket.h usual/socket.c usual/socket_ntop.c usual/socket_pton.c usual/socket_win32.h \ usual/statlist.h \ usual/string.h usual/string.c \ usual/strpool.h usual/strpool.c \ usual/time.h usual/time.c \ usual/utf8.h usual/utf8.c \ usual/wchar.h usual/wchar.c # we want to filter headers, so cannot use usual install method via _HEADERS USUAL_HEADERS = $(filter %.h,$(libusual_a_SOURCES) $(nodist_EXTRA_libusual_a_SOURCES)) # define aclocal destination aclocaldir = ${datarootdir}/aclocal AM_DESTINATIONS = aclocal # other files dist_pkgdata_SCRIPTS = find_modules.sh dist_aclocal_DATA = m4/usual.m4 m4/antimake.m4 # test program for link-test noinst_PROGRAMS = test/compile test_compile_SOURCES = test/compile.c test_compile_LDADD = libusual.a # extra clean files DISTCLEANFILES = config.log build.mk config.status libtool config.mak MAINTAINERCLEANFILES = build.mk.in configure install-sh ltmain.sh config.sub config.guess # files for .tgz that are not mentioned in sources EXTRA_DIST = $(MAINTAINERCLEANFILES) # we dont build test subdir by default, but want to include in .tgz DIST_SUBDIRS = test # non-recursive subdir EMBED_SUBDIRS = mk # # Launch Antimake # include build.mk # filter headers when installing install-local: @$(MKDIR_P) $(DESTDIR)$(includedir)/usual @$(MKDIR_P) $(DESTDIR)$(includedir)/usual/hashing @$(MKDIR_P) $(DESTDIR)$(includedir)/usual/crypto @for hdr in $(USUAL_HEADERS); do \ echo Filtering $$hdr; \ $(SED) -f mk/safe-headers.sed $$hdr \ > $(DESTDIR)$(includedir)/$$hdr; \ done # Give proper error message build.mk: @echo "Please run ./configure first" @exit 1 %.pc: %.pc.in config.status ./config.status --file $@ # run sparse over code sparse: config.mak REAL_CC="$(CC)" \ $(MAKE) clean libusual.a CC="cgcc -Wsparse-all -Wno-transparent-union" # generate api documentation dox: rm -rf doc/html/mk #rm -rf mk/temos/html doxygen doc/Doxyfile $(MAKE) -C mk/temos html cp -rp mk/temos/html doc/html/mk # # rest is for pgutil_kwlookup generation # PG_CONFIG ?= pg_config KWLIST = $(shell $(PG_CONFIG) --includedir-server)/parser/kwlist.h GPERF = gperf -m5 # requires 8.4+ kws: @test -f "$(KWLIST)" || { echo "kwlist.h not found"; exit 1; } grep '^PG_KEYWORD' "$(KWLIST)" \ | grep -v UNRESERVED \ | sed 's/.*"\(.*\)",.*, *\(.*\)[)].*/\1/' \ >> usual/pgutil_kwlookup.gp kwh: $(GPERF) usual/pgutil_kwlookup.g \ | sed '/^#line/d' \ > usual/pgutil_kwlookup.h sizes: all size `find .objs -name '.libs' -prune -o -name '*.o' -print | sort` %.s: %.c $(CC) -S $(DEFS) $(CFLAGS) $(CPPFLAGS) -I. $< -o - | cleanasm > $@ skytools-3.2.6/lib/test/0000755000000000000000000000000012166266754012043 5ustar skytools-3.2.6/lib/test/test_regex.c0000644000000000000000000000532012166266754014360 0ustar #include #include #include "test_common.h" #define NMATCH 20 /* * quick regex sanity check */ /* execute basic regex and return result as string */ static const char *b_rx(const char *regex, const char *str, int flags) { static char buf[512]; regex_t rx; regmatch_t matches[NMATCH]; int nmatch, err, i; char *dst = buf; memset(&rx, 0, sizeof(rx)); memset(matches, -1, sizeof(&matches)); /* compile */ err = regcomp(&rx, regex, flags); if (err) goto fail; nmatch = rx.re_nsub; /* match */ err = regexec(&rx, str, NMATCH, matches, 0); if (err) goto fail; /* format result */ for (i = 0; i < nmatch + 1; i++) { regmatch_t *m = &matches[i]; *dst++ = '('; if (m->rm_so >= 0) dst += sprintf(dst, "%d", (int)m->rm_so); else *dst++ = '?'; *dst++ = ','; if (m->rm_eo >= 0) dst += sprintf(dst, "%d", (int)m->rm_eo); else *dst++ = '?'; *dst++ = ')'; } regfree(&rx); return buf; fail: /* format error */ regfree(&rx); switch (err) { case REG_NOMATCH: return "NOMATCH"; case REG_BADBR: return "BADBR"; case REG_BADPAT: return "BADPAT"; case REG_BADRPT: return "BADRPT"; case REG_EBRACE: return "EBRACE"; case REG_EBRACK: return "EBRACK"; case REG_ECOLLATE: return "ECOLLATE"; case REG_ECTYPE: return "ECTYPE"; #ifdef REG_EEND case REG_EEND: return "EEND"; #endif case REG_EESCAPE: return "EESCAPE"; case REG_EPAREN: return "EPAREN"; case REG_ERANGE: return "ERANGE"; #ifdef REG_ESIZE case REG_ESIZE: return "ESIZE"; #endif case REG_ESPACE: return "ESPACE"; case REG_ESUBREG: return "ESUBREG"; #ifdef REG_ENOSYS case REG_ENOSYS: return "ENOSYS"; #endif #ifdef REG_EMPTY case REG_EMPTY: return "EMPTY"; #endif default: return "UNKNOWN_ERROR"; } } /* execute extended regex and return result as string */ static const char *e_rx(const char *regex, const char *str, int flags) { return b_rx(regex, str, flags | REG_EXTENDED); } static void test_regex(void *ptr) { str_check(e_rx("foo*", "foobar", 0), "(0,3)"); str_check(e_rx("foo(x)?.*", "foobar", 0), "(0,6)(?,?)"); str_check(e_rx("foo", "bar", 0), "NOMATCH"); str_check(e_rx("foo{5,1}", "bar", 0), "BADBR"); /* str_check(e_rx("(|)", "bar", 0), "BADPAT"); */ str_check(e_rx("*", "bar", 0), "BADRPT"); str_check(e_rx("foo{", "bar", 0), "EBRACE"); str_check(e_rx("fo[o", "bar", 0), "EBRACK"); str_check(e_rx("[[:foo:]]", "bar", 0), "ECTYPE"); str_check(e_rx("foo\\", "foobar", 0), "EESCAPE"); str_check(e_rx("fo(o", "bar", 0), "EPAREN"); str_check(e_rx("[a-b-c]", "bar", 0), "ERANGE"); str_check(b_rx("(\\1)", "bar", 0), "ESUBREG"); str_check(e_rx("[[:random:]]", "bar", 0), "ECTYPE"); end:; } /* * Describe */ struct testcase_t regex_tests[] = { { "minimal", test_regex }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/tinytest.h0000644000000000000000000000751412166266754014106 0ustar /* tinytest.h -- Copyright 2009 Nick Mathewson * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _TINYTEST_H #define _TINYTEST_H /** Flag for a test that needs to run in a subprocess. */ #define TT_FORK (1<<0) /** Runtime flag for a test we've decided to skip. */ #define TT_SKIP (1<<1) /** Internal runtime flag for a test we've decided to run. */ #define _TT_ENABLED (1<<2) /** If you add your own flags, make them start at this point. */ #define TT_FIRST_USER_FLAG (1<<3) typedef void (*testcase_fn)(void *); struct testcase_t; /** Functions to initialize/teardown a structure for a testcase. */ struct testcase_setup_t { /** Return a new structure for use by a given testcase. */ void *(*setup_fn)(const struct testcase_t *); /** Clean/free a structure from setup_fn. Return 1 if ok, 0 on err. */ int (*cleanup_fn)(const struct testcase_t *, void *); }; /** A single test-case that you can run. */ struct testcase_t { const char *name; /**< An identifier for this case. */ testcase_fn fn; /**< The function to run to implement this case. */ unsigned long flags; /**< Bitfield of TT_* flags. */ const struct testcase_setup_t *setup; /**< Optional setup/cleanup fns*/ void *setup_data; /**< Extra data usable by setup function */ }; #define END_OF_TESTCASES { NULL, NULL, 0, NULL, NULL } /** A group of tests that are selectable together. */ struct testgroup_t { const char *prefix; /**< Prefix to prepend to testnames. */ struct testcase_t *cases; /** Array, ending with END_OF_TESTCASES */ }; #define END_OF_GROUPS { NULL, NULL} /** Implementation: called from a test to indicate failure, before logging. */ void _tinytest_set_test_failed(void); /** Implementation: called from a test to indicate that we're skipping. */ void _tinytest_set_test_skipped(void); /** Implementation: return 0 for quiet, 1 for normal, 2 for loud. */ int _tinytest_get_verbosity(void); /** Implementation: Set a flag on tests matching a name; returns number * of tests that matched. */ int _tinytest_set_flag(struct testgroup_t *, const char *, unsigned long); /** Set all tests in 'groups' matching the name 'named' to be skipped. */ #define tinytest_skip(groups, named) \ _tinytest_set_flag(groups, named, TT_SKIP) /** Run a single testcase in a single group. */ int testcase_run_one(const struct testgroup_t *,const struct testcase_t *); /** Run a set of testcases from an END_OF_GROUPS-terminated array of groups, as selected from the command line. */ int tinytest_main(int argc, const char **argv, struct testgroup_t *groups); #endif skytools-3.2.6/lib/test/tinytest_macros.h0000644000000000000000000001207212166266754015445 0ustar /* tinytest_macros.h -- Copyright 2009 Nick Mathewson * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _TINYTEST_MACROS_H #define _TINYTEST_MACROS_H /* Helpers for defining statement-like macros */ #define TT_STMT_BEGIN do { #define TT_STMT_END } while(0) /* Redefine this if your test functions want to abort with something besides * "goto end;" */ #ifndef TT_EXIT_TEST_FUNCTION #define TT_EXIT_TEST_FUNCTION TT_STMT_BEGIN goto end; TT_STMT_END #endif /* Redefine this if you want to note success/failure in some different way. */ #ifndef TT_DECLARE #define TT_DECLARE(prefix, args) \ TT_STMT_BEGIN \ printf("\n %s %s:%d: ",prefix,__FILE__,__LINE__); \ printf args ; \ TT_STMT_END #endif /* Announce a failure. Args are parenthesized printf args. */ #define TT_GRIPE(args) TT_DECLARE("FAIL", args) /* Announce a non-failure if we're verbose. */ #define TT_BLATHER(args) \ TT_STMT_BEGIN \ if (_tinytest_get_verbosity()>1) TT_DECLARE(" OK", args); \ TT_STMT_END #define TT_DIE(args) \ TT_STMT_BEGIN \ _tinytest_set_test_failed(); \ TT_GRIPE(args); \ TT_EXIT_TEST_FUNCTION; \ TT_STMT_END #define TT_FAIL(args) \ TT_STMT_BEGIN \ _tinytest_set_test_failed(); \ TT_GRIPE(args); \ TT_STMT_END /* Fail and abort the current test for the reason in msg */ #define tt_abort_printf(msg) TT_DIE(msg) #define tt_abort_perror(op) TT_DIE(("%s: %s [%d]",(op),strerror(errno), errno)) #define tt_abort_msg(msg) TT_DIE(("%s", msg)) #define tt_abort() TT_DIE(("%s", "(Failed.)")) /* Fail but do not abort the current test for the reason in msg. */ #define tt_fail_printf(msg) TT_FAIL(msg) #define tt_fail_perror(op) TT_FAIL(("%s: %s [%d]",(op),strerror(errno), errno)) #define tt_fail_msg(msg) TT_FAIL(("%s", msg)) #define tt_fail() TT_FAIL(("%s", "(Failed.)")) /* End the current test, and indicate we are skipping it. */ #define tt_skip() \ TT_STMT_BEGIN \ _tinytest_set_test_skipped(); \ TT_EXIT_TEST_FUNCTION; \ TT_STMT_END #define _tt_want(b, msg, fail) \ TT_STMT_BEGIN \ if (!(b)) { \ _tinytest_set_test_failed(); \ TT_GRIPE((msg)); \ fail; \ } else { \ TT_BLATHER((msg)); \ } \ TT_STMT_END /* Assert b, but do not stop the test if b fails. Log msg on failure. */ #define tt_want_msg(b, msg) \ _tt_want(b, msg, ); /* Assert b and stop the test if b fails. Log msg on failure. */ #define tt_assert_msg(b, msg) \ _tt_want(b, msg, TT_EXIT_TEST_FUNCTION); /* Assert b, but do not stop the test if b fails. */ #define tt_want(b) tt_want_msg( (b), "want("#b")") /* Assert b, and stop the test if b fails. */ #define tt_assert(b) tt_assert_msg((b), "assert("#b")") #define tt_assert_test_type(a,b,str_test,type,test,fmt) \ TT_STMT_BEGIN \ type _val1 = (type)(a); \ type _val2 = (type)(b); \ if (!(test)) { \ TT_DIE(("assert(%s): "fmt" vs "fmt, \ str_test, _val1, _val2)); \ } else { \ TT_BLATHER(("assert(%s): "fmt" vs "fmt, \ str_test, _val1, _val2)); \ } \ TT_STMT_END /* Helper: assert that a op b, when cast to type. Format the values with * printf format fmt on failure. */ #define tt_assert_op_type(a,op,b,type,fmt) \ tt_assert_test_type(a,b,#a" "#op" "#b,type,(_val1 op _val2),fmt) #define tt_int_op(a,op,b) \ tt_assert_test_type(a,b,#a" "#op" "#b,long,(_val1 op _val2),"%ld") #define tt_uint_op(a,op,b) \ tt_assert_test_type(a,b,#a" "#op" "#b,unsigned long, \ (_val1 op _val2),"%lu") #define tt_ptr_op(a,op,b) \ tt_assert_test_type(a,b,#a" "#op" "#b,void*, \ (_val1 op _val2),"%p") #define tt_str_op(a,op,b) \ tt_assert_test_type(a,b,#a" "#op" "#b,const char *, \ (strcmp(_val1,_val2) op 0),"<%s>") #endif skytools-3.2.6/lib/test/test_fileutil.c0000644000000000000000000000256112166266754015067 0ustar #include #include #include #include "test_common.h" /* * LN1 = 4*8 * LN2 = 8*4*8 * LN3 = 8*8*4*8 */ #define LN1 "11112222333344445555666677778888" #define LN2 LN1 LN1 LN1 LN1 LN1 LN1 LN1 LN1 #define LN3 LN2 LN2 LN2 LN2 LN2 LN2 LN2 LN2 static const char fdata[] = "1\n" "line 2\n" "\n" LN3 "noln"; static const char filename[] = "test_fileutil.tmp"; static bool createfile(void) { FILE *f = fopen(filename, "wb+"); if (!f) return false; fwrite(fdata, 1, strlen(fdata), f); fclose(f); return true; } static void test_fsize(void *p) { int_check(createfile(), 1); tt_assert(file_size(filename) == (int)strlen(fdata)); tt_assert(file_size(filename) == (int)sizeof(fdata) - 1); tt_assert(file_size("nonexist") == -1); end:; } static bool addln(void *arg, const char *ln, ssize_t len) { struct MBuf *buf = arg; int xlen = len; if (len < 0) return false; if (len > 0 && ln[len - 1] == '\n') xlen--; if (memchr(ln, '\n', xlen)) return false; return mbuf_write(buf, ln, len); } static void test_getline(void *p) { struct MBuf buf; mbuf_init_dynamic(&buf); tt_assert(foreach_line(filename, addln, &buf)); tt_assert(mbuf_write_byte(&buf, 0)); end: unlink(filename); mbuf_free(&buf); } struct testcase_t fileutil_tests[] = { { "file_size", test_fsize }, { "getline", test_getline }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_mdict.c0000644000000000000000000000252012166266754014345 0ustar #include #include #include "test_common.h" static const char *xget(struct MDict *d, const char *k) { const char *val = mdict_get(d, k); return val ? val : "NULL"; } static void test_mdict(void *p) { struct MDict *d; struct MBuf buf; const char *s; d = mdict_new(USUAL_ALLOC); str_check(xget(d, "key"), "NULL"); int_check(mdict_put(d, "key", "val"), 1); int_check(mdict_put(d, "key2", "foo"), 1); int_check(mdict_put(d, "key2", ""), 1); int_check(mdict_put(d, "key3", NULL), 1); int_check(mdict_put(d, "key4", "v1"), 1); int_check(mdict_del(d, "key4"), 1); str_check(xget(d, "key"), "val"); str_check(xget(d, "key2"), ""); str_check(xget(d, "key3"), "NULL"); str_check(xget(d, "key4"), "NULL"); str_check(xget(d, "key5"), "NULL"); int_check(mdict_del(d, "key5"), 0); mbuf_init_dynamic(&buf); int_check(mdict_urlencode(d, &buf), 1); int_check(mbuf_write_byte(&buf, 0), 1); str_check(mbuf_data(&buf), "key=val&key2=&key3"); mbuf_free(&buf); mdict_free(d); d = mdict_new(USUAL_ALLOC); s = "key=val&key2=&key3"; int_check(mdict_urldecode(d, s, strlen(s)), 1); str_check(xget(d, "key"), "val"); str_check(xget(d, "key2"), ""); str_check(xget(d, "key3"), "NULL"); mdict_free(d); end:; } /* * Describe */ struct testcase_t mdict_tests[] = { { "basic", test_mdict }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_strpool.c0000644000000000000000000000222112166266754014745 0ustar #include #include "test_common.h" #include static void test_strpool(void *p) { struct StrPool *pool; struct PStr *s; pool = strpool_create(USUAL_ALLOC); tt_assert(pool); strpool_free(pool); pool = strpool_create(USUAL_ALLOC); tt_assert(pool); int_check(strpool_total(pool), 0); s = strpool_get(pool, "foo", -1); str_check(s->str, "foo"); int_check(s->refcnt, 1); int_check(s->len, 3); int_check(strpool_total(pool), 1); tt_assert(s == strpool_get(pool, "fooTAIL", 3)); int_check(s->refcnt, 2); int_check(strpool_total(pool), 1); strpool_incref(s); int_check(s->refcnt, 3); strpool_decref(s); int_check(s->refcnt, 2); strpool_decref(s); int_check(s->refcnt, 1); int_check(strpool_total(pool), 1); strpool_decref(s); int_check(strpool_total(pool), 0); strpool_free(pool); /* free strc with strings */ pool = strpool_create(USUAL_ALLOC); tt_assert(pool); s = strpool_get(pool, "foo", -1); s = strpool_get(pool, "bar", 3); int_check(strpool_total(pool), 2); strpool_free(pool); end:; } /* * Describe */ struct testcase_t strpool_tests[] = { { "strpool", test_strpool }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_cbtree.c0000644000000000000000000000622612166266754014520 0ustar #include #include #include "test_common.h" static char *OK = "OK"; struct MyNode { char str[64]; int len; }; static unsigned int my_getkey(void *ctx, void *obj, const void **dst_p) { struct MyNode *node = obj; *dst_p = node->str; return node->len; } static struct MyNode *make_node(int value) { struct MyNode *node = malloc(sizeof(*node)); memset(node, 0, sizeof(*node)); snprintf(node->str, sizeof(node->str), "%d", value); node->len = strlen(node->str); return node; } static bool my_node_free(void *ctx, void *obj) { free(obj); return true; } /* * Test tree sanity */ /* * checking operations */ static const char *my_search(struct CBTree *tree, int value) { struct AANode *res; char buf[64]; snprintf(buf, sizeof(buf), "%d", value); res = cbtree_lookup(tree, buf, strlen(buf)); return res ? OK : "not found"; } static const char *my_insert(struct CBTree *tree, int value) { struct MyNode *my = make_node(value); if (!cbtree_insert(tree, my)) return "insert failed"; return my_search(tree, value); } static const char *my_remove(struct CBTree *tree, int value) { struct MyNode *my; char buf[64]; snprintf(buf, sizeof(buf), "%d", value); my = cbtree_lookup(tree, buf, strlen(buf)); if (!my) return "nonexsist element"; cbtree_delete(tree, buf, strlen(buf)); if (cbtree_lookup(tree, buf, strlen(buf)) != NULL) return "still found"; return OK; } /* * Simple opeartions. */ static void test_cbtree_basic(void *p) { struct CBTree *tree; int i; tree = cbtree_create(my_getkey, my_node_free, NULL, USUAL_ALLOC); str_check(my_search(tree, 1), "not found"); for (i = 0; i < 15; i++) { str_check(my_insert(tree, i), "OK"); } for (i = -1; i > -15; i--) { str_check(my_insert(tree, i), "OK"); } for (i = 30; i < 45; i++) { str_check(my_insert(tree, i), "OK"); } for (i = 15; i < 30; i++) { str_check(my_insert(tree, i), "OK"); } for (i = -14; i < 45; i++) { str_check(my_remove(tree, i), "OK"); } end: cbtree_destroy(tree); } /* * randomized test */ #define RSIZE 3000 static int get_next(bool with_stat, bool added[]) { int r = random() % RSIZE; int i = r; while (1) { if (added[i] == with_stat) return i; if (++i >= RSIZE) i = 0; if (i == r) return -1; } } static void test_cbtree_random(void *p) { bool is_added[RSIZE]; int prefer_remove = 0; /* 0 - insert, 1 - delete */ int n; int op; /* 0 - insert, 1 - delete */ struct CBTree *tree; unsigned long long total = 0; srandom(123123); memset(is_added, 0, sizeof(is_added)); tree = cbtree_create(my_getkey, my_node_free, NULL, USUAL_ALLOC); while (total < 20000) { int r = random() & 15; if (prefer_remove) op = r > 5; else op = r > 10; /* op = 0; */ n = get_next(op, is_added); if (n < 0) { if (prefer_remove == op) { prefer_remove = !prefer_remove; } continue; } if (op == 0) { str_check(my_insert(tree, n), "OK"); is_added[n] = 1; } else { str_check(my_remove(tree, n), "OK"); is_added[n] = 0; } total++; } end: cbtree_destroy(tree); } struct testcase_t cbtree_tests[] = { { "basic", test_cbtree_basic }, { "random", test_cbtree_random }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_base.c0000644000000000000000000000275512166266754014171 0ustar #include #include "test_common.h" #include struct somestruct { char a, b, c; }; static void test_ptr(void *p) { /* offsetof */ int_check(offsetof(struct somestruct, a), 0); int_check(offsetof(struct somestruct, b), 1); int_check(offsetof(struct somestruct, c), 2); /* container_of */ { struct somestruct s = {'a', 'b', 'c'}; char *pa = &s.a; char *pb = &s.b; char *pc = &s.c; struct somestruct *sa, *sb, *sc; sa = container_of(pa, struct somestruct, a); sb = container_of(pb, struct somestruct, b); sc = container_of(pc, struct somestruct, c); int_check(sa->a, 'a'); int_check(sb->b, 'b'); int_check(sc->c, 'c'); } /* alignof */ int_check(alignof(char), 1); int_check(alignof(short), 2); int_check(alignof(int), 4); /* CUSTOM_ALIGN */ int_check(CUSTOM_ALIGN(1, 4), 4); int_check(CUSTOM_ALIGN(2, 4), 4); int_check(CUSTOM_ALIGN(3, 4), 4); int_check(CUSTOM_ALIGN(4, 4), 4); int_check(CUSTOM_ALIGN(5, 4), 8); end:; } #ifdef _PACKED struct packed { char a; int b; char c; short d; } _PACKED; #endif static void test_misc(void *_p) { int i_4[4]; int i_2[2]; short s_4[4]; short s_2[2]; int_check(ARRAY_NELEM(i_4), 4); int_check(ARRAY_NELEM(i_2), 2); int_check(ARRAY_NELEM(s_4), 4); int_check(ARRAY_NELEM(s_2), 2); int_check(strcmp(__func__, "test_misc"), 0); #ifdef _PACKED int_check(sizeof(struct packed), 8); #endif end:; } struct testcase_t base_tests[] = { { "ptr", test_ptr }, { "misc", test_misc }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_cxalloc.c0000644000000000000000000000401612166266754014674 0ustar #include "test_common.h" #include #include #include static int delta = 0; static char logbuf[1024]; static void reset(void) { logbuf[0] = 0; } static void m_log(const char *fmt, ...) { size_t len = strlen(logbuf); va_list ap; if (len && len < sizeof(logbuf) - 1) logbuf[len++] = ' '; va_start(ap, fmt); vsnprintf(logbuf + len, sizeof(logbuf) - len, fmt, ap); va_end(ap); } static void *log_alloc(void *ctx, size_t len) { void *p; m_log("A(%d)", (int)len); delta += len; p = cx_alloc(ctx, len + 8); *(int*)p = len; return (char *)p + 8; } static void *log_realloc(void *ctx, void *ptr, size_t len) { char *p = (char *)ptr - 8; int olen = *(int*)p; m_log("R(%d)", (int)len); p = cx_realloc(ctx, p, len + 8); *(int*)p = len; delta += len - olen; return p + 8; } static void log_free(void *ctx, const void *ptr) { char *p = (char *)ptr - 8; int len = *(int*)p; delta -= len; m_log("F(%d)", len); cx_free(ctx, p); } static const struct CxOps log_ops = { log_alloc, log_realloc, log_free, }; static const struct CxMem log_libc = { &log_ops, (void*)&cx_libc_allocator, }; #define log_check(x) str_check(logbuf, x); reset(); static void test_cxalloc_basic(void *zzz) { CxMem *cx = &log_libc; void *p; delta = 0; p = cx_alloc(cx, 16); log_check("A(16)") p = cx_realloc(cx, p, 500); log_check("R(500)") cx_free(cx, p); log_check("F(500)"); int_check(delta, 0); end: reset(); } static void test_cxalloc_tree(void *zzz) { CxMem *cx1, *cx2; void *p; delta = 0; cx1 = cx_new_tree(&log_libc); p = cx_alloc(cx1, 16); p = cx_realloc(cx1, p, 500); p = cx_realloc(cx1, p, 1500); p = cx_alloc(cx1, 55); cx_free(cx1, p); cx2 = cx_new_tree(cx1); p = cx_realloc(cx2, NULL, 2500); cx2 = cx_new_tree(cx2); p = cx_realloc(cx2, NULL, 3500); cx_destroy(cx1); /* str_check(logbuf, "A(16)R(500)F()"); */ int_check(delta, 0); end: reset(); } struct testcase_t cxalloc_tests[] = { { "basic", test_cxalloc_basic }, { "tree", test_cxalloc_tree }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_endian.c0000644000000000000000000000455112166266754014511 0ustar #include #include "test_common.h" #include #include /* * bswap*() */ static void test_bswap(void *p) { int_check(bswap16(0xff01), 0x01ff); int_check(bswap32(0x01020304), 0x04030201); ull_check(bswap64(0x0102030405060708ULL), 0x0807060504030201ULL); end:; } /* * *enc(), *dec() */ static uint64_t tdecode(int t, ...) { uint8_t buf[16]; bool be = t > 0; va_list ap; uint64_t val = 777; int i; if (t < 0) t = -t; va_start(ap, t); memset(buf, 0xC1, sizeof(buf)); for (i = 0; i < t; i++) buf[i] = va_arg(ap, int); va_end(ap); if (be) { switch (t) { case 2: val = be16dec(buf); break; case 4: val = be32dec(buf); break; case 8: val = be64dec(buf); break; } } else { switch (t) { case 2: val = le16dec(buf); break; case 4: val = le32dec(buf); break; case 8: val = le64dec(buf); break; } } return val; } static const char *tencode(int t, uint64_t val) { static char res[64]; uint8_t buf[16]; bool be = t > 0; int i; if (t < 0) t = -t; memset(buf, 0xFC, sizeof(buf)); if (be) { switch (t) { case 2: be16enc(buf, val); break; case 4: be32enc(buf, val); break; case 8: be64enc(buf, val); break; } } else { switch (t) { case 2: le16enc(buf, val); break; case 4: le32enc(buf, val); break; case 8: le64enc(buf, val); break; } } for (i = t; i < (int)sizeof(buf); i++) { if (buf[i] != 0xFC) return "OVER"; } snprintf(res, sizeof(res), "%02X %02X %02X %02X %02X %02X %02X %02X ", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); res[t*3 - 1] = 0; return res; } static void test_encdec(void *p) { ull_check(tdecode( 2, 1,2), 0x0102); ull_check(tdecode(-2, 1,2), 0x0201); ull_check(tdecode( 4, 1,2,3,4), 0x01020304); ull_check(tdecode(-4, 1,2,3,4), 0x04030201); ull_check(tdecode( 8, 1,2,3,4,5,6,7,8), 0x0102030405060708); ull_check(tdecode(-8, 1,2,3,4,5,6,7,8), 0x0807060504030201); str_check(tencode( 2, 0x0102), "01 02"); str_check(tencode(-2, 0x0102), "02 01"); str_check(tencode( 4, 0x01020304), "01 02 03 04"); str_check(tencode(-4, 0x01020304), "04 03 02 01"); str_check(tencode( 8, 0x0102030405060708ULL), "01 02 03 04 05 06 07 08"); str_check(tencode(-8, 0x0102030405060708ULL), "08 07 06 05 04 03 02 01"); end:; } /* * Describe */ struct testcase_t endian_tests[] = { { "bswap", test_bswap }, { "encdec", test_encdec }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_list.c0000644000000000000000000000567312166266754014234 0ustar #include "test_common.h" #include #include struct MyNode { struct List node; int val; int seq; }; static int my_cmp(const struct List *a, const struct List *b) { struct MyNode *aa, *bb; aa = container_of(a, struct MyNode, node); bb = container_of(b, struct MyNode, node); if (aa->val < bb->val) return -1; if (aa->val > bb->val) return 1; return 0; } static bool check_list(struct List *list, int n) { struct List *old, *cur; int i; old = NULL; i = 0; for (cur = list->next; cur != list; cur = cur->next) { i++; if (old) { struct MyNode *mcur, *mold; mcur = container_of(cur, struct MyNode, node); mold = container_of(old, struct MyNode, node); if (mold->val > mcur->val) { printf("bad order(%d): old.val=%d new.val=%d", n, mold->val, mcur->val); return false; } if (mold->val == mcur->val && mold->seq > mcur->seq) { printf("unstable(%d): old.seq=%d new.seq=%d", n, mold->seq, mcur->seq); return false; } if (cur->prev != old) { printf("llist err 2 (n=%d)", n); return false; } } else { if (cur->prev != list) { printf("llist err (n=%d)", n); return false; } } old = cur; } if (list->prev != ((old) ? old : list)) { printf("llist err 3 (n=%d)", n); return false; } if (i != n) { printf("llist err 3 (n=%d)", n); return false; } return true; } static bool test_sort(void (*sort)(struct List *list, list_cmp_f cmp), int n) { struct MemPool *pool = NULL; struct List list[1]; bool ok; int i; /* random */ list_init(list); for (i = 0; i < n; i++) { struct MyNode *e = mempool_alloc(&pool, sizeof(*e)); list_init(&e->node); e->val = random() % 100; e->seq = i; list_append(list, &e->node); } sort(list, my_cmp); ok = check_list(list, n); mempool_destroy(&pool); if (!ok) return false; /* seq */ list_init(list); for (i = 0; i < n; i++) { struct MyNode *e = mempool_alloc(&pool, sizeof(*e)); list_init(&e->node); e->val = i; e->seq = i; list_append(list, &e->node); } sort(list, my_cmp); ok = check_list(list, n); mempool_destroy(&pool); if (!ok) return false; /* reverse */ list_init(list); for (i = 0; i < n; i++) { struct MyNode *e = mempool_alloc(&pool, sizeof(*e)); list_init(&e->node); e->val = -i; e->seq = i; list_append(list, &e->node); } sort(list, my_cmp); ok = check_list(list, n); mempool_destroy(&pool); if (!ok) return false; return true; } static void test_list_sort(void *p) { int i; for (i = 0; i < 259; i++) tt_assert(test_sort(list_sort, i)); end:; } #if 0 static void test_list_sort2(void *p) { int i; for (i = 0; i < 259; i++) tt_assert(test_sort(list_sort2, i)); end:; } static void test_list_sort3(void *p) { int i; for (i = 0; i < 259; i++) tt_assert(test_sort(list_sort3, i)); end:; } #endif struct testcase_t list_tests[] = { { "sort", test_list_sort }, #if 0 { "sort2", test_list_sort2 }, { "sort3", test_list_sort3 }, #endif END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_hashing.c0000644000000000000000000000347112166266754014674 0ustar #include #include #include #include "test_common.h" static uint32_t xcrc32(const char *s) { return calc_crc32(s, strlen(s), 0); } static uint32_t xlookup3(const char *s) { return hash_lookup3(s, strlen(s)); } static void test_crc32(void *p) { int_check(xcrc32(""), 0); int_check(xcrc32("a"), 3904355907); int_check(xcrc32("abc"), 891568578); int_check(xcrc32("message digest"), 538287487); int_check(xcrc32("abcdefghijklmnopqrstuvwxyz"), 1277644989); int_check(xcrc32("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"), 532866770); int_check(xcrc32("12345678901234567890123456789012345678901234567890123456789012345678901234567890"), 2091469426); end:; } static void test_lookup3(void *p) { #ifdef WORDS_BIGENDIAN int_check(xlookup3(""), 3735928559); int_check(xlookup3("a"), -454251968); int_check(xlookup3("abc"), -1186250080); int_check(xlookup3("message digest"), 670730672); int_check(xlookup3("abcdefghijklmnopqrstuvwxyz"), 251682059); int_check(xlookup3("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"), 567386262); int_check(xlookup3("12345678901234567890123456789012345678901234567890123456789012345678901234567890"), 312582506); #else int_check(xlookup3(""), 3735928559); int_check(xlookup3("a"), 1490454280); int_check(xlookup3("abc"), 238646833); int_check(xlookup3("message digest"), 2512672053); int_check(xlookup3("abcdefghijklmnopqrstuvwxyz"), 1966650813); int_check(xlookup3("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"), 3992286962); int_check(xlookup3("12345678901234567890123456789012345678901234567890123456789012345678901234567890"), 2776963519); #endif end:; } struct testcase_t hashing_tests[] = { { "crc32", test_crc32 }, { "lookup3", test_lookup3 }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_time.c0000644000000000000000000000175212166266754014211 0ustar #include #include #include "test_common.h" static void test_get_time(void *p) { usec_t t, t2; usec_t ct, ct2; t = get_time_usec(); ct = get_cached_time(); usleep(USEC / 4); t2 = get_time_usec(); tt_assert(t + USEC / 4 <= t2); ct2 = get_cached_time(); tt_assert(ct2 == ct); reset_time_cache(); ct2 = get_cached_time(); tt_assert(ct2 != ct); end:; } static void test_time_format(void *p) { char buf[128]; usec_t t; #ifdef WIN32 tt_assert(_putenv("TZ=GMT") >= 0); _tzset(); printf( "_daylight = %d\n", _daylight ); printf( "_timezone = %ld\n", _timezone ); printf( "_tzname[0] = %s\n", _tzname[0] ); #else setenv("TZ", "GMT", 1); tzset(); #endif t = 1226059006841546; str_check(format_time_ms(t, buf, sizeof(buf)), "2008-11-07 11:56:46.841"); str_check(format_time_s(t, buf, sizeof(buf)), "2008-11-07 11:56:46"); end:; } struct testcase_t time_tests[] = { { "gettime", test_get_time }, { "format", test_time_format }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/tinytest.c0000644000000000000000000002257112166266754014101 0ustar /* tinytest.c -- Copyright 2009 Nick Mathewson * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #ifdef WIN32 #include #include #else #include #include #include #endif #include #define evutil_snprintf snprintf #include "tinytest.h" #include "tinytest_macros.h" #define LONGEST_TEST_NAME 16384 static int in_tinytest_main = 0; /**< true if we're in tinytest_main().*/ static int n_ok = 0; /**< Number of tests that have passed */ static int n_bad = 0; /**< Number of tests that have failed. */ static int n_skipped = 0; /**< Number of tests that have been skipped. */ static int opt_forked = 0; /**< True iff we're called from inside a win32 fork*/ static int opt_nofork = 0; /**< Suppress calls to fork() for debugging. */ static int opt_verbosity = 1; /**< -==quiet,0==terse,1==normal,2==verbose */ const char *verbosity_flag = ""; enum outcome { SKIP=2, OK=1, FAIL=0 }; static enum outcome cur_test_outcome = 0; const char *cur_test_prefix = NULL; /**< prefix of the current test group */ /** Name of the current test, if we haven't logged is yet. Used for --quiet */ const char *cur_test_name = NULL; #ifdef WIN32 /** Pointer to argv[0] for win32. */ static const char *commandname = NULL; #endif static enum outcome _testcase_run_bare(const struct testcase_t *testcase) { void *env = NULL; int outcome; if (testcase->setup) { env = testcase->setup->setup_fn(testcase); if (!env) return FAIL; else if (env == (void*)TT_SKIP) return SKIP; } cur_test_outcome = OK; testcase->fn(env); outcome = cur_test_outcome; if (testcase->setup) { if (testcase->setup->cleanup_fn(testcase, env) == 0) outcome = FAIL; } return outcome; } #define MAGIC_EXITCODE 42 static enum outcome _testcase_run_forked(const struct testgroup_t *group, const struct testcase_t *testcase) { #ifdef WIN32 /* Fork? On Win32? How primitive! We'll do what the smart kids do: we'll invoke our own exe (whose name we recall from the command line) with a command line that tells it to run just the test we want, and this time without forking. (No, threads aren't an option. The whole point of forking is to share no state between tests.) */ int ok; char buffer[LONGEST_TEST_NAME+256]; STARTUPINFO si; PROCESS_INFORMATION info; DWORD exitcode; if (!in_tinytest_main) { printf("\nERROR. On Windows, _testcase_run_forked must be" " called from within tinytest_main.\n"); abort(); } if (opt_verbosity>0) printf("[forking] "); evutil_snprintf(buffer, sizeof(buffer), "%s --RUNNING-FORKED %s %s%s", commandname, verbosity_flag, group->prefix, testcase->name); memset(&si, 0, sizeof(si)); memset(&info, 0, sizeof(info)); si.cb = sizeof(si); ok = CreateProcess(commandname, buffer, NULL, NULL, 0, 0, NULL, NULL, &si, &info); if (!ok) { printf("CreateProcess failed!\n"); return 0; } WaitForSingleObject(info.hProcess, INFINITE); GetExitCodeProcess(info.hProcess, &exitcode); CloseHandle(info.hProcess); CloseHandle(info.hThread); if (exitcode == 0) return OK; else if (exitcode == MAGIC_EXITCODE) return SKIP; else return FAIL; #else int outcome_pipe[2]; pid_t pid; (void)group; if (pipe(outcome_pipe)) perror("opening pipe"); if (opt_verbosity>0) printf("[forking] "); pid = fork(); if (!pid) { /* child. */ int test_r, write_r; char b[1]; close(outcome_pipe[0]); test_r = _testcase_run_bare(testcase); assert(0<=(int)test_r && (int)test_r<=2); b[0] = "NYS"[test_r]; write_r = write(outcome_pipe[1], b, 1); if (write_r != 1) { perror("write outcome to pipe"); exit(1); } exit(0); } else { /* parent */ int status, r; char b[1]; /* Close this now, so that if the other side closes it, * our read fails. */ close(outcome_pipe[1]); r = read(outcome_pipe[0], b, 1); if (r == 0) { printf("[Lost connection!] "); return 0; } else if (r != 1) { perror("read outcome from pipe"); } waitpid(pid, &status, 0); close(outcome_pipe[0]); return b[0]=='Y' ? OK : (b[0]=='S' ? SKIP : FAIL); } #endif } int testcase_run_one(const struct testgroup_t *group, const struct testcase_t *testcase) { enum outcome outcome; if (testcase->flags & TT_SKIP) { if (opt_verbosity>0) printf("%s%s: SKIPPED\n", group->prefix, testcase->name); ++n_skipped; return SKIP; } if (opt_verbosity>0 && !opt_forked) { printf("%s%s: ", group->prefix, testcase->name); } else { if (opt_verbosity==0) printf("."); cur_test_prefix = group->prefix; cur_test_name = testcase->name; } if ((testcase->flags & TT_FORK) && !(opt_forked||opt_nofork)) { outcome = _testcase_run_forked(group, testcase); } else { outcome = _testcase_run_bare(testcase); } if (outcome == OK) { ++n_ok; if (opt_verbosity>0 && !opt_forked) puts(opt_verbosity==1?"OK":""); } else if (outcome == SKIP) { ++n_skipped; if (opt_verbosity>0 && !opt_forked) puts("SKIPPED"); } else { ++n_bad; if (!opt_forked) printf("\n [%s FAILED]\n", testcase->name); } if (opt_forked) { exit(outcome==OK ? 0 : (outcome==SKIP?MAGIC_EXITCODE : 1)); } else { return (int)outcome; } } int _tinytest_set_flag(struct testgroup_t *groups, const char *arg, unsigned long flag) { int i, j; int length = LONGEST_TEST_NAME; char fullname[LONGEST_TEST_NAME]; int found=0; if (strstr(arg, "..")) length = strstr(arg,"..")-arg; for (i=0; groups[i].prefix; ++i) { for (j=0; groups[i].cases[j].name; ++j) { evutil_snprintf(fullname, sizeof(fullname), "%s%s", groups[i].prefix, groups[i].cases[j].name); if (!flag) /* Hack! */ printf(" %s\n", fullname); if (!strncmp(fullname, arg, length)) { groups[i].cases[j].flags |= flag; ++found; } } } return found; } static void usage(struct testgroup_t *groups, int list_groups) { puts("Options are: [--verbose|--quiet|--terse] [--no-fork]"); puts(" Specify tests by name, or using a prefix ending with '..'"); puts(" Use --list-tests for a list of tests."); if (list_groups) { puts("Known tests are:"); _tinytest_set_flag(groups, "..", 0); } exit(0); } int tinytest_main(int c, const char **v, struct testgroup_t *groups) { int i, j, n=0; #ifdef WIN32 commandname = v[0]; #endif for (i=1; i= 1) printf("%d tests ok. (%d skipped)\n", n_ok, n_skipped); return (n_bad == 0) ? 0 : 1; } int _tinytest_get_verbosity(void) { return opt_verbosity; } void _tinytest_set_test_failed(void) { if (opt_verbosity <= 0 && cur_test_name) { if (opt_verbosity==0) puts(""); printf("%s%s: ", cur_test_prefix, cur_test_name); cur_test_name = NULL; } cur_test_outcome = 0; } void _tinytest_set_test_skipped(void) { if (cur_test_outcome==OK) cur_test_outcome = SKIP; } skytools-3.2.6/lib/test/test_string.c0000644000000000000000000001267212166266754014564 0ustar #include #include #ifdef HAVE_LIBGEN_H #include #endif #undef basename #undef dirname #include #include #include "test_common.h" /* * strlcpy */ static char *run_strlcpy(char *dst, const char *src, int size, int expres) { int res; strcpy(dst, "XXX"); res = strlcpy(dst, src, size); if (res != expres) return "FAIL"; return dst; } static void test_strlcpy(void *ptr) { char buf[128]; str_check(run_strlcpy(buf, "", 16, 0), ""); str_check(run_strlcpy(buf, "", 0, 0), "XXX"); str_check(run_strlcpy(buf, "", 16, 0), ""); str_check(run_strlcpy(buf, "abc", 16, 3), "abc"); str_check(run_strlcpy(buf, "abc", 4, 3), "abc"); str_check(run_strlcpy(buf, "abc", 3, 3), "ab"); str_check(run_strlcpy(buf, "abc", 2, 3), "a"); str_check(run_strlcpy(buf, "abc", 1, 3), ""); str_check(run_strlcpy(buf, "abc", 0, 3), "XXX"); end:; } /* * strlcat */ static char *run_strlcat(char *dst, const char *src, int size, int expres) { int res; strcpy(dst, "PFX"); res = strlcat(dst, src, size); if (res != expres) return "FAIL"; return dst; } static void test_strlcat(void *ptr) { char buf[128]; str_check(run_strlcat(buf, "", 16, 3), "PFX"); str_check(run_strlcat(buf, "abc", 16, 6), "PFXabc"); str_check(run_strlcat(buf, "abc", 7, 6), "PFXabc"); str_check(run_strlcat(buf, "abc", 6, 6), "PFXab"); str_check(run_strlcat(buf, "abc", 5, 6), "PFXa"); str_check(run_strlcat(buf, "abc", 4, 6), "PFX"); str_check(run_strlcat(buf, "abc", 3, 6), "PFX"); str_check(run_strlcat(buf, "abc", 2, 5), "PFX"); str_check(run_strlcat(buf, "abc", 1, 4), "PFX"); str_check(run_strlcat(buf, "abc", 0, 3), "PFX"); end:; } /* * strerror_r() */ static void test_strerror_r(void *p) { char buf[128]; /* "int" vs. "const char *" */ tt_assert(strerror_r(EINTR, buf, sizeof(buf)) != 0); tt_assert(strlen(strerror_r(EINTR, buf, sizeof(buf))) != 0); end:; } /* * memrchr */ static void test_memrchr(void *p) { static const char data[] = "abcdabc"; tt_assert(memrchr(data, 'a', 8) == data + 4); tt_assert(memrchr(data, 'a', 4) == data + 0); tt_assert(memrchr(data, 'd', 8) == data + 3); tt_assert(memrchr(data, 'x', 8) == NULL); end:; } /* * basename */ static const char *run_basename(const char *path) { static char buf[128]; const char *res; if (!path) return basename(NULL); strlcpy(buf, path, sizeof(buf)); res = basename(buf); if (strcmp(buf, path) != 0) return "MODIFIES"; return res; } static void test_basename(void *p) { str_check(run_basename("/usr/lib"), "lib"); str_check(run_basename("/usr/"), "usr"); str_check(run_basename("/"), "/"); str_check(run_basename("///"), "/"); str_check(run_basename("//usr//lib//"), "lib"); str_check(run_basename(""), "."); str_check(run_basename("a/"), "a"); str_check(run_basename(NULL), "."); end:; } /* * dirname */ static const char *run_dirname(const char *path) { static char buf[128]; const char *res; if (!path) return dirname(NULL); strlcpy(buf, path, sizeof(buf)); res = dirname(buf); if (strcmp(buf, path) != 0) return "MODIFIES"; return res; } static void test_dirname(void *p) { str_check(run_dirname("/usr/lib"), "/usr"); str_check(run_dirname("/usr/"), "/"); str_check(run_dirname("usr"), "."); str_check(run_dirname("/usr/"), "/"); str_check(run_dirname("/"), "/"); str_check(run_dirname("/"), "/"); str_check(run_dirname(".."), "."); str_check(run_dirname("."), "."); str_check(run_dirname(""), "."); str_check(run_dirname("a/"), "."); str_check(run_dirname("a//"), "."); str_check(run_dirname(NULL), "."); end:; } /* * strlist */ static bool slshow(void *arg, const char *s) { struct MBuf *mb = arg; if (mbuf_written(mb) > 0) { if (!mbuf_write_byte(mb, ',')) return false; } if (!s) s = "NULL"; return mbuf_write(mb, s, strlen(s)); } static const char *lshow(const struct StrList *sl) { static char buf[128]; bool ok; struct MBuf mb; mbuf_init_fixed_writer(&mb, buf, sizeof(buf)); ok = strlist_foreach(sl, slshow, &mb); if (!ok) return "FAIL"; ok = mbuf_write_byte(&mb, 0); if (!ok) return "FAIL"; return buf; } static void test_strlist(void *p) { struct StrList *sl = NULL; const char *s; sl = strlist_new(USUAL_ALLOC); str_check(lshow(sl), ""); strlist_append(sl, "1"); str_check(lshow(sl), "1"); strlist_append(sl, "2"); str_check(lshow(sl), "1,2"); strlist_append(sl, "3"); str_check(lshow(sl), "1,2,3"); s = strlist_pop(sl); str_check(s, "1"); free(s); strlist_append(sl, NULL); str_check(lshow(sl), "2,3,NULL"); strlist_free(sl); end:; } static bool sl_add(void *arg, const char *s) { return strlist_append(arg, s); } static const char *wlist(const char *s) { const char *res = "FAIL"; struct StrList *sl = strlist_new(USUAL_ALLOC); bool ok = parse_word_list(s, sl_add, sl); if (ok) { if (strlist_empty(sl)) res = "-"; else res = lshow(sl); } strlist_free(sl); return res; } static void test_wlist(void *p) { str_check(wlist("1,2,3"), "1,2,3"); str_check(wlist(" 1 , \n 2 \t , \t3"), "1,2,3"); str_check(wlist(" 1 "), "1"); str_check(wlist(" 1 ,"), "1"); str_check(wlist(", 1 "), "1"); str_check(wlist("1 2"), "1 2"); str_check(wlist(" "), ""); end:; } /* * Describe */ struct testcase_t string_tests[] = { { "strlcpy", test_strlcpy }, { "strlcat", test_strlcat }, { "strerror_r", test_strerror_r }, { "memrchr", test_memrchr }, { "basename", test_basename }, { "dirname", test_dirname }, { "strlist", test_strlist }, { "parse_wordlist", test_wlist }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_crypto.c0000644000000000000000000006743412166266754014604 0ustar #include #include "tinytest.h" #include "tinytest_macros.h" #define str_check(a, b) tt_str_op(a, ==, b) #include #include #include #include #include #include #include #include static const char *mkhex(const uint8_t *src, int len) { static char buf[1024 + 1]; static const char hextbl[] = "0123456789abcdef"; int i; for (i = 0; i < len; i++) { buf[i*2] = hextbl[src[i] >> 4]; buf[i*2+1] = hextbl[src[i] & 15]; } buf[i*2] = 0; return buf; } static int hexval(char v) { if (v >= '0' && v <= '9') return v - '0'; if (v >= 'a' && v <= 'f') return v - 'a' + 10; if (v >= 'A' && v <= 'F') return v - 'A' + 10; return -1; } static uint8_t *fromhex(const char *input, int len) { uint8_t *res; const char *s = input; int i, b, b1, b2; res = malloc(len+1); if (!res) return NULL; for (i = 0; i < len; i++) { if (*s == '\0') s = input; b1 = hexval(*s++); b2 = hexval(*s++); b = (b1 << 4) | b2; if (b < 0) { free(res); return NULL; } res[i] = b; } return res; } static const char *run_hash(const char *str, const char *hexstr, const struct DigestInfo *impl) { struct DigestContext *ctx; uint8_t res[512]; uint8_t res2[512]; int i, len, step; int reslen; uint8_t *buf = NULL; if (hexstr) { len = strlen(hexstr) / 2; buf = fromhex(hexstr, len); if (!buf) return "NOMEM"; str = (char *)buf; } else { len = strlen(str); } ctx = digest_new(impl, USUAL_ALLOC); if (!ctx) return "NOMEM"; reslen = digest_result_len(ctx); digest_update(ctx, str, len); digest_final(ctx, res); digest_reset(ctx); step = 3; for (i = 0; i < len; i += step) digest_update(ctx, str+i, (i + step <= len) ? (step) : (len - i)); digest_final(ctx, res2); digest_free(ctx); if (buf) free(buf); if (memcmp(res, res2, reslen) != 0) return "FAIL"; return mkhex(res, reslen); } /* * MD5 */ static const char *run_md5(const char *str) { return run_hash(str, NULL, digest_MD5()); } static void test_md5(void *ptr) { str_check(run_md5(""), "d41d8cd98f00b204e9800998ecf8427e"); str_check(run_md5("a"), "0cc175b9c0f1b6a831c399e269772661"); str_check(run_md5("abc"), "900150983cd24fb0d6963f7d28e17f72"); str_check(run_md5("message digest"), "f96b697d7cb7938d525a2f31aaf161d0"); str_check(run_md5("abcdefghijklmnopqrstuvwxyz"), "c3fcd3d76192e4007dfb496cca67e13b"); str_check(run_md5("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"), "d174ab98d277d9f5a5611c2c9f419d9f"); str_check(run_md5("12345678901234567890123456789012345678901234567890123456789012345678901234567890"), "57edf4a22be3c955ac49da2e2107b67a"); end:; } /* * SHA1 */ static const char *run_sha1(const char *str) { return run_hash(str, NULL, digest_SHA1()); } static void test_sha1(void *ptr) { str_check(run_sha1(""), "da39a3ee5e6b4b0d3255bfef95601890afd80709"); str_check(run_sha1("a"), "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8"); str_check(run_sha1("abc"), "a9993e364706816aba3e25717850c26c9cd0d89d"); str_check(run_sha1("message digest"), "c12252ceda8be8994d5fa0290a47231c1d16aae3"); str_check(run_sha1("abcdefghijklmnopqrstuvwxyz"), "32d10c7b8cf96570ca04ce37f2a19d84240d3a89"); str_check(run_sha1("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"), "761c457bf73b14d27e9e9265c46f4b4dda11f940"); str_check(run_sha1("12345678901234567890123456789012345678901234567890123456789012345678901234567890"), "50abf5706a150990a08b2c5ea40fa0e585554732"); end:; } /* * SHA224 */ static const char *run_sha224(const char *str) { return run_hash(str, NULL, digest_SHA224()); } static void test_sha224(void *ptr) { str_check(run_sha224(""), "d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f"); str_check(run_sha224("a"), "abd37534c7d9a2efb9465de931cd7055ffdb8879563ae98078d6d6d5"); str_check(run_sha224("abc"), "23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7"); str_check(run_sha224("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"), "75388b16512776cc5dba5da1fd890150b0c6455cb4f58b1952522525"); str_check(run_sha224("12345678901234567890123456789012345678901234567890123456789012345678901234567890"), "b50aecbe4e9bb0b57bc5f3ae760a8e01db24f203fb3cdcd13148046e"); end:; } /* * SHA256 */ static const char *run_sha256(const char *str) { return run_hash(str, NULL, digest_SHA256()); } static void test_sha256(void *ptr) { str_check(run_sha256(""), "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); str_check(run_sha256("a"), "ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb"); str_check(run_sha256("abc"), "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"); str_check(run_sha256("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"), "248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1"); str_check(run_sha256("12345678901234567890123456789012345678901234567890123456789012345678901234567890"), "f371bc4a311f2b009eef952dd83ca80e2b60026c8e935592d0f9c308453c813e"); end:; } /* * SHA384 */ static const char *run_sha384(const char *str) { return run_hash(str, NULL, digest_SHA384()); } static void test_sha384(void *ptr) { str_check(run_sha384(""), "38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b"); str_check(run_sha384("a"), "54a59b9f22b0b80880d8427e548b7c23abd873486e1f035dce9cd697e85175033caa88e6d57bc35efae0b5afd3145f31"); str_check(run_sha384("abc"), "cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed8086072ba1e7cc2358baeca134c825a7"); str_check(run_sha384("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"), "3391fdddfc8dc7393707a65b1b4709397cf8b1d162af05abfe8f450de5f36bc6b0455a8520bc4e6f5fe95b1fe3c8452b"); str_check(run_sha384("abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu"), "09330c33f71147e83d192fc782cd1b4753111b173b3b05d22fa08086e3b0f712fcc7c71a557e2db966c3e9fa91746039"); str_check(run_sha384("abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"), "3d208973ab3508dbbd7e2c2862ba290ad3010e4978c198dc4d8fd014e582823a89e16f9b2a7bbc1ac938e2d199e8bea4"); end:; } /* * SHA512 */ static const char *run_sha512(const char *str) { return run_hash(str, NULL, digest_SHA512()); } static void test_sha512(void *ptr) { str_check(run_sha512(""), "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"); str_check(run_sha512("a"), "1f40fc92da241694750979ee6cf582f2d5d7d28e18335de05abc54d0560e0f5302860c652bf08d560252aa5e74210546f369fbbbce8c12cfc7957b2652fe9a75"); str_check(run_sha512("abc"), "ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f"); str_check(run_sha512("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"), "204a8fc6dda82f0a0ced7beb8e08a41657c16ef468b228a8279be331a703c33596fd15c13b1b07f9aa1d3bea57789ca031ad85c7a71dd70354ec631238ca3445"); str_check(run_sha512("abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"), "930d0cefcb30ff1133b6898121f1cf3d27578afcafe8677c5257cf069911f75d8f5831b56ebfda67b278e66dff8b84fe2b2870f742a580d8edb41987232850c9"); str_check(run_sha512("abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu"), "8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa17299aeadb6889018501d289e4900f7e4331b99dec4b5433ac7d329eeb6dd26545e96e55b874be909"); end:; } /* * Keccak-224 */ static const char *run_keccak224(const char *hex) { return run_hash(NULL, hex, digest_KECCAK224()); } static void test_keccak224(void *ptr) { str_check(run_keccak224(""), "f71837502ba8e10837bdd8d365adb85591895602fc552b48b7390abd"); str_check(run_keccak224("CC"), "a9cab59eb40a10b246290f2d6086e32e3689faf1d26b470c899f2802"); str_check(run_keccak224("41FB"), "615ba367afdc35aac397bc7eb5d58d106a734b24986d5d978fefd62c"); str_check(run_keccak224("1F877C"), "6f9d2898efd096baaaaab2e97482ddb6389b8e6caa964b7a0e347e13"); str_check(run_keccak224("C1ECFDFC"), "e405869da1464a705700a3cbce131aabeeba9c8d2fe6576b21bcbe16"); str_check(run_keccak224("21F134AC57"), "5573da2b02216a860389a581f6e9fb8d805e9e02f6fa911701eee298"); str_check(run_keccak224("C6F50BB74E29"), "163c9060163aa66b8b7c0cfaa65d934bff219bcbc267187caba0042f"); str_check(run_keccak224("119713CC83EEEF"), "cfc04c6f8463ddab24cdf8b8652bd11df23dd1b95f118328dd01580e"); str_check(run_keccak224("4A4F202484512526"), "7a5c2cb3f999dd00eff7399963314ca647dd0e5ae1bddec611f8338d"); str_check(run_keccak224("B32D95B0B9AAD2A8816DE6D06D1F86008505BD8C14124F6E9A163B5A2ADE55F835D0EC3880EF50700D3B25E42CC0AF050CCD1BE5E555B23087E04D7BF9813622780C7313A1954F8740B6EE2D3F71F768DD417F520482BD3A08D4F222B4EE9DBD015447B33507DD50F3AB4247C5DE9A8ABD62A8DECEA01E3B87C8B927F5B08BEB37674C6F8E380C04"), "42275c296937745758ff2b7bee9a897191ae87e42bd10198d9466c19"); str_check(run_keccak224("04410E31082A47584B406F051398A6ABE74E4DA59BB6F85E6B49E8A1F7F2CA00DFBA5462C2CD2BFDE8B64FB21D70C083F11318B56A52D03B81CAC5EEC29EB31BD0078B6156786DA3D6D8C33098C5C47BB67AC64DB14165AF65B44544D806DDE5F487D5373C7F9792C299E9686B7E5821E7C8E2458315B996B5677D926DAC57B3F22DA873C601016A0D"), "143f9055eb1f736729c77721fb65ed5ee142f6e969132fb22989c11f"); str_check(run_keccak224("3A3A819C48EFDE2AD914FBF00E18AB6BC4F14513AB27D0C178A188B61431E7F5623CB66B23346775D386B50E982C493ADBBFC54B9A3CD383382336A1A0B2150A15358F336D03AE18F666C7573D55C4FD181C29E6CCFDE63EA35F0ADF5885CFC0A3D84A2B2E4DD24496DB789E663170CEF74798AA1BBCD4574EA0BBA40489D764B2F83AADC66B148B4A0CD95246C127D5871C4F11418690A5DDF01246A0C80A43C70088B6183639DCFDA4125BD113A8F49EE23ED306FAAC576C3FB0C1E256671D817FC2534A52F5B439F72E424DE376F4C565CCA82307DD9EF76DA5B7C4EB7E085172E328807C02D011FFBF33785378D79DC266F6A5BE6BB0E4A92ECEEBAEB1"), "5af56987ea9cf11fcd0eac5ebc14b037365e9b1123e31cb2dfc7929a"); end:; } /* * Keccak-256 */ static const char *run_keccak256(const char *hex) { return run_hash(NULL, hex, digest_KECCAK256()); } static void test_keccak256(void *ptr) { str_check(run_keccak256(""), "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"); str_check(run_keccak256("CC"), "eead6dbfc7340a56caedc044696a168870549a6a7f6f56961e84a54bd9970b8a"); str_check(run_keccak256("41FB"), "a8eaceda4d47b3281a795ad9e1ea2122b407baf9aabcb9e18b5717b7873537d2"); str_check(run_keccak256("1F877C"), "627d7bc1491b2ab127282827b8de2d276b13d7d70fb4c5957fdf20655bc7ac30"); str_check(run_keccak256("C1ECFDFC"), "b149e766d7612eaf7d55f74e1a4fdd63709a8115b14f61fcd22aa4abc8b8e122"); str_check(run_keccak256("21F134AC57"), "67f05544dbe97d5d6417c1b1ea9bc0e3a99a541381d1cd9b08a9765687eb5bb4"); str_check(run_keccak256("C6F50BB74E29"), "923062c4e6f057597220d182dbb10e81cd25f60b54005b2a75dd33d6dac518d0"); str_check(run_keccak256("119713CC83EEEF"), "feb8405dcd315d48c6cbf7a3504996de8e25cc22566efec67433712eda99894f"); str_check(run_keccak256("4A4F202484512526"), "e620d8f2982b24fedaaa3baa9b46c3f9ce204ee356666553ecb35e15c3ff9bf9"); str_check(run_keccak256("B32D95B0B9AAD2A8816DE6D06D1F86008505BD8C14124F6E9A163B5A2ADE55F835D0EC3880EF50700D3B25E42CC0AF050CCD1BE5E555B23087E04D7BF9813622780C7313A1954F8740B6EE2D3F71F768DD417F520482BD3A08D4F222B4EE9DBD015447B33507DD50F3AB4247C5DE9A8ABD62A8DECEA01E3B87C8B927F5B08BEB37674C6F8E380C04"), "e717a7769448abbe5fef8187954a88ac56ded1d22e63940ab80d029585a21921"); str_check(run_keccak256("04410E31082A47584B406F051398A6ABE74E4DA59BB6F85E6B49E8A1F7F2CA00DFBA5462C2CD2BFDE8B64FB21D70C083F11318B56A52D03B81CAC5EEC29EB31BD0078B6156786DA3D6D8C33098C5C47BB67AC64DB14165AF65B44544D806DDE5F487D5373C7F9792C299E9686B7E5821E7C8E2458315B996B5677D926DAC57B3F22DA873C601016A0D"), "a95d50b50b4545f0947441df74a1e9d74622eb3baa49c1bbfc3a0cce6619c1aa"); str_check(run_keccak256("3A3A819C48EFDE2AD914FBF00E18AB6BC4F14513AB27D0C178A188B61431E7F5623CB66B23346775D386B50E982C493ADBBFC54B9A3CD383382336A1A0B2150A15358F336D03AE18F666C7573D55C4FD181C29E6CCFDE63EA35F0ADF5885CFC0A3D84A2B2E4DD24496DB789E663170CEF74798AA1BBCD4574EA0BBA40489D764B2F83AADC66B148B4A0CD95246C127D5871C4F11418690A5DDF01246A0C80A43C70088B6183639DCFDA4125BD113A8F49EE23ED306FAAC576C3FB0C1E256671D817FC2534A52F5B439F72E424DE376F4C565CCA82307DD9EF76DA5B7C4EB7E085172E328807C02D011FFBF33785378D79DC266F6A5BE6BB0E4A92ECEEBAEB1"), "348fb774adc970a16b1105669442625e6adaa8257a89effdb5a802f161b862ea"); end:; } /* * Keccak-384 */ static const char *run_keccak384(const char *hex) { return run_hash(NULL, hex, digest_KECCAK384()); } static void test_keccak384(void *ptr) { str_check(run_keccak384(""), "2c23146a63a29acf99e73b88f8c24eaa7dc60aa771780ccc006afbfa8fe2479b2dd2b21362337441ac12b515911957ff"); str_check(run_keccak384("CC"), "1b84e62a46e5a201861754af5dc95c4a1a69caf4a796ae405680161e29572641f5fa1e8641d7958336ee7b11c58f73e9"); str_check(run_keccak384("41FB"), "495cce2714cd72c8c53c3363d22c58b55960fe26be0bf3bbc7a3316dd563ad1db8410e75eefea655e39d4670ec0b1792"); str_check(run_keccak384("1F877C"), "b0665c345f45e6de145b0190335ef5d5aa59e0b49fc1425d5eae7355ea442284cb8a2152d565ebdf2810eccab15af04f"); str_check(run_keccak384("C1ECFDFC"), "f1850b2abb24f3fd683c701582789d9e92b6a45f9c345f9dae7f7997c8c910e88003e592e59281cf92c92d6b51a1afd1"); str_check(run_keccak384("21F134AC57"), "68d437327f158287c304bbaf36f782f497da2c480a1fbb268682362218641f9070a014919ad7331c49beefccb437fe9a"); str_check(run_keccak384("C6F50BB74E29"), "03566ec003ff55184f0c85beebc6d1ecf5e5d082d8d40137246f8fd42bce097c09418845ef60286fdd894a00fd2d6589"); str_check(run_keccak384("119713CC83EEEF"), "790d700fa34d6a835be311b639474780148a2f087ac2fa86e8a1a433ec7a04fcbfc5284a3e188b7d91c6d094eafbeecb"); str_check(run_keccak384("4A4F202484512526"), "638e65758a297cb09ded1ac5b9e8f779802000ab791f67f33c60be36443793adcc8a4a58e98688157a41784f02a4bcb2"); str_check(run_keccak384("B32D95B0B9AAD2A8816DE6D06D1F86008505BD8C14124F6E9A163B5A2ADE55F835D0EC3880EF50700D3B25E42CC0AF050CCD1BE5E555B23087E04D7BF9813622780C7313A1954F8740B6EE2D3F71F768DD417F520482BD3A08D4F222B4EE9DBD015447B33507DD50F3AB4247C5DE9A8ABD62A8DECEA01E3B87C8B927F5B08BEB37674C6F8E380C04"), "278e83cff1ff6cc4b3ac41f3879da87ae63b535b43815e273687a4cc519855b452cb6af0198bb9fd0f3e43739bc0cdd7"); str_check(run_keccak384("04410E31082A47584B406F051398A6ABE74E4DA59BB6F85E6B49E8A1F7F2CA00DFBA5462C2CD2BFDE8B64FB21D70C083F11318B56A52D03B81CAC5EEC29EB31BD0078B6156786DA3D6D8C33098C5C47BB67AC64DB14165AF65B44544D806DDE5F487D5373C7F9792C299E9686B7E5821E7C8E2458315B996B5677D926DAC57B3F22DA873C601016A0D"), "aa4b5a5fb94fe19578f33323ba1eefc5b6ed70b34bc70193f386c99f73863611af20581b4b1b3ed776df9e235d3d4e45"); str_check(run_keccak384("3A3A819C48EFDE2AD914FBF00E18AB6BC4F14513AB27D0C178A188B61431E7F5623CB66B23346775D386B50E982C493ADBBFC54B9A3CD383382336A1A0B2150A15358F336D03AE18F666C7573D55C4FD181C29E6CCFDE63EA35F0ADF5885CFC0A3D84A2B2E4DD24496DB789E663170CEF74798AA1BBCD4574EA0BBA40489D764B2F83AADC66B148B4A0CD95246C127D5871C4F11418690A5DDF01246A0C80A43C70088B6183639DCFDA4125BD113A8F49EE23ED306FAAC576C3FB0C1E256671D817FC2534A52F5B439F72E424DE376F4C565CCA82307DD9EF76DA5B7C4EB7E085172E328807C02D011FFBF33785378D79DC266F6A5BE6BB0E4A92ECEEBAEB1"), "6bff1c8405a3fe594e360e3bccea1ebcd509310dc79b9e45c263783d7a5dd662c6789b18bd567dbdda1554f5bee6a860"); end:; } /* * Keccak-512 */ static const char *run_keccak512(const char *hex) { return run_hash(NULL, hex, digest_KECCAK512()); } static void test_keccak512(void *ptr) { str_check(run_keccak512(""), "0eab42de4c3ceb9235fc91acffe746b29c29a8c366b7c60e4e67c466f36a4304c00fa9caf9d87976ba469bcbe06713b435f091ef2769fb160cdab33d3670680e"); str_check(run_keccak512("CC"), "8630c13cbd066ea74bbe7fe468fec1dee10edc1254fb4c1b7c5fd69b646e44160b8ce01d05a0908ca790dfb080f4b513bc3b6225ece7a810371441a5ac666eb9"); str_check(run_keccak512("41FB"), "551da6236f8b96fce9f97f1190e901324f0b45e06dbbb5cdb8355d6ed1dc34b3f0eae7dcb68622ff232fa3cece0d4616cdeb3931f93803662a28df1cd535b731"); str_check(run_keccak512("1F877C"), "eb7f2a98e00af37d964f7d8c44c1fb6e114d8ee21a7b976ae736539efdc1e3fe43becef5015171e6da30168cae99a82c53fa99042774ef982c01626a540f08c0"); str_check(run_keccak512("C1ECFDFC"), "952d4c0a6f0ef5ce438c52e3edd345ea00f91cf5da8097c1168a16069e958fc05bad90a0c5fb4dd9ec28e84b226b94a847d6bb89235692ef4c9712f0c7030fae"); str_check(run_keccak512("21F134AC57"), "2e76d93affd62b92fc4f29cb83efbe4ba21d88426aa7f075bfc20960ea258787898172e17045af43ab1fe445532be0185fbea84d9be788b05f14dbf4856a5254"); str_check(run_keccak512("C6F50BB74E29"), "40fa8074e1e509b206448fbe757d9494b9b51e8d6e674a67f53c11ef92e96c3ea08b95ebd4172b020010cd6cf29539a34d6bfa002a2042787aa8d879a0f5b54c"); str_check(run_keccak512("119713CC83EEEF"), "d1116786a3c1ea46a8f22d82abb4c5d06dc0691b2e747ac9726d0b290e6959f7b23428519a656b237695e56403855ec4c98db0cf87f31b6ceabf2b9b8589b713"); str_check(run_keccak512("4A4F202484512526"), "f326c7c126ddc277922760feef77c9bab6fb5d3430f652593703d7c5e30135cd0b0575257509a624184330d6ab1f508a666391b5d4690426b4e05301891df897"); str_check(run_keccak512("B32D95B0B9AAD2A8816DE6D06D1F86008505BD8C14124F6E9A163B5A2ADE55F835D0EC3880EF50700D3B25E42CC0AF050CCD1BE5E555B23087E04D7BF9813622780C7313A1954F8740B6EE2D3F71F768DD417F520482BD3A08D4F222B4EE9DBD015447B33507DD50F3AB4247C5DE9A8ABD62A8DECEA01E3B87C8B927F5B08BEB37674C6F8E380C04"), "a6054ffc3d81591be964c4b004a3a21142365b59ee98b2873d488293f93a8d7154bf72100012c60d3c9418f6af8ea66372cb4703f5f6381de6d4b9b98cff1e90"); str_check(run_keccak512("04410E31082A47584B406F051398A6ABE74E4DA59BB6F85E6B49E8A1F7F2CA00DFBA5462C2CD2BFDE8B64FB21D70C083F11318B56A52D03B81CAC5EEC29EB31BD0078B6156786DA3D6D8C33098C5C47BB67AC64DB14165AF65B44544D806DDE5F487D5373C7F9792C299E9686B7E5821E7C8E2458315B996B5677D926DAC57B3F22DA873C601016A0D"), "b0e54a12fdba0738898f1bbf0ba81f81de77648d8d14c20bdd5d90f300d382e069f5dba7eec6b23168b008b9f39c2b93fd742a5902a5e02728f57712d6a61d4e"); str_check(run_keccak512("3A3A819C48EFDE2AD914FBF00E18AB6BC4F14513AB27D0C178A188B61431E7F5623CB66B23346775D386B50E982C493ADBBFC54B9A3CD383382336A1A0B2150A15358F336D03AE18F666C7573D55C4FD181C29E6CCFDE63EA35F0ADF5885CFC0A3D84A2B2E4DD24496DB789E663170CEF74798AA1BBCD4574EA0BBA40489D764B2F83AADC66B148B4A0CD95246C127D5871C4F11418690A5DDF01246A0C80A43C70088B6183639DCFDA4125BD113A8F49EE23ED306FAAC576C3FB0C1E256671D817FC2534A52F5B439F72E424DE376F4C565CCA82307DD9EF76DA5B7C4EB7E085172E328807C02D011FFBF33785378D79DC266F6A5BE6BB0E4A92ECEEBAEB1"), "81950e7096d31d4f22e3db71cac725bf59e81af54c7ca9e6aeee71c010fc5467466312a01aa5c137cfb140646941556796f612c9351268737c7e9a2b9631d1fa"); end:; } /* * Keccak-Stream */ static const char *run_keccakS(const char *hex) { struct DigestContext *ctx; uint8_t res[512]; int len; unsigned reslen; uint8_t *buf = NULL; len = strlen(hex) / 2; buf = fromhex(hex, len); ctx = digest_new(digest_KECCAK_STREAM(), USUAL_ALLOC); if (!ctx) return "NOMEM"; digest_update(ctx, buf, len); free(buf); reslen = 0; while (reslen < sizeof(res)) { digest_final(ctx, res + reslen); reslen += digest_result_len(ctx); } return mkhex(res, reslen); } static void test_keccak_stream(void *ptr) { str_check(run_keccakS(""), "6753e3380c09e385d0339eb6b050a68f66cfd60a73476e6fd6adeb72f5edd7c6f04a5d017a19cbe291935855b4860f69df04c98aa78b407a9ba9826f7266ef14ba6d3f90c4fe154d27c2858ea6db8c117411a1bc5c499410c391b298f37bf636b0f5c31dbd6487a7d3d8cf2a97b619697e66d894299b8b4d80e0498538e18544c3a2fa33f0bfb1cfef8da7875c4967f332c7fc93c050e81fb404f9a91503d6010ee16f50b4ed0bc563ba8431668b003d7e2e6f226cb7fa93bb2e132c861fdc2141457589a63ecf05481126a7c2de941a2fdec71cb70de81887b9014223865e79c4ffe82dae83c1fc484b9a07a7e52b135f4ae3a0e09247ea4e2625e9349b0ac73f24cb418df6dcb49ca37860298ada18aa23595b5096ef789de3edf3826817fff4f71102a01e1d2599f2958d5c186f5b11f5feedb61bb732dbb42d18b1e77258a8f211bf95c9f47f19603ec419ff879aea41a4811344d016bbc4f9496741c469cca425c5be73543219af40796c0b9ff14aeaa70c5e22e4bb1346a3ddfedd8a559104e4704f1227d42918ae3f7404fbf3c6340a486e776aabcc34190f87da4bd954b83386255a0e34df05ca2e781faf6fe66475852481fce20798a56629abfac408760ce64606008a3b568c88aba1c6df3381e0765567ea84b2ce4b441cf1eefaa32125d5139361a632b3008566a2e8af1055cb06ae462b6bf87b34a9770618e6"); str_check(run_keccakS("CC"), "56b97029b479ff5dd15f17d12983e3b835bb0531d9b8d49b103b025ca53f991741298e961d1fad00fc365c7761bfb278ae473980d612c1629e075a3fdbae7f82b0f0af54df187f358852e19ea4347cf5ceea676a1dce3a47447e237fd74204f9a4b7f7c9cc7cc8b865b1d554e2f5f4a8ee17dbdde7267894558a20972c9eb6cf5f62ce9151437718ed4aff08fa76d803806e6ce47d229aae839369e31888b26429e27bc3756021cb51498bcf2527d4bb04838bc1ceed9985a2a66ff8cb8c2d58b7099304e7f9622c583b093024a5fcde2be781474c159df24d77d328c298f5766a8a0dbf7ae790a509ccf59e0cacd0abf21492e0095a87ecdb55990093917aaa96d7f68b7b859b8094aec0ddb6fb352a6cc1f007fa988ed764f5d6f21f9d8ade9ce7aca4de6570da39d9acceb46d2582fa4c4231de0b736fb341041d24cfae6c0761f43a2cf7383f38742579218afcab53d2e6816640de05644d877558e965b1a28406999f31ccc43ac0b02bc5448b66ad3b6f8de04c0e25845c8671b6f0594909a057f17fd06031707c8b4599889c994a35c193dbf84a7a0919cd054f67ceb7965f420d02da3477efc8b55413c241adcf71cb10fe7e3e720b8c1736837b06e4b27461b71c6cac892437530bbfe05cf426272f80f11709b9db964f5dedab9e757c2f7a972b6a4c2443b03ad787ab1e243660bced739157a434800696841acea4"); str_check(run_keccakS("41FB"), "cbe96338dd8f04c0694299637aab223b6d60560c6bed7f69923aebb24fc61b84702403d39e7d081f7f7b714e3ba6e6221fe840f57a1e9bd775b90d59c9853695c2b11cd06f1054210d7d8155b908ff4ee14fdf859b6d5aa6bf76903be0af4a2ffd52b2b149da32c8e372f51826d4ca7dcd6516d167a0621aa88986d19a524dd352b9ca08f341d2267671f45e05892e1a5c604bb721bc8952dac20d559dc183656501cc34bc91e2aea930716b20539131ac5f9ae0a630e3691abe6e76935d21f99e3f2e531526360405004bd730388236a1197fe3715315e8ca40b4e5e6a07cab434264515c26451a7c1387d776bc225b851e9f7807c24a23f42fb47eb29697f6cd80cdbfb79a39675092ab582c5a6bb3284cd72a889601dc2745153fac80ff81c6648cb99facfe51862edc8b03c2c3ba5b83eb1d40d3937caf3d8e511485051d3e5431a19c1571b52e796cf032162292ecf2b490cd97c3e2fc2ca339021533cd1aa1c5e8b3f803767ae7585999a2b7d70c7b34324b36399a87c3c73866741cbef9355c1570309544697df9a82da28b6c5ce35556c5bef4e0a24e62f95e543cd3fad6d2ddeea3950e72867d67a8dcd4b338fd8341583fe0e04fff2d6ecdfcd4b41eb8434ee0e31f812b220494202fab9fcba09a9dd26b36637df5607c6d7cbede04868a2d512d11a16c24c4b8d566ce63932b85e7e1a8648f58857629bdc2ee92"); str_check(run_keccakS("C1ECFDFC"), "968a76720a2fff1ac9629cac7752c0958b6e19bc79fa31f210244d486645798b55f52581855da53a14139dd78e15f54c66bde1bcc5674f46de6164a86933b2cc99682f7118af25b9034cddac018e6d02f3890fa581c79cad5c6c2380a890ea470876e7bede8e8b78aa6f0cb271f54252dd018c7c9d393a06d60a78be8a5014b89eaa282dfec4e737e43a61cfaade58f9bdde9c6125daf34350b2b4e320f35b62dd0675c0515b943630e3f63880423864dbd70814cc9373c521e8f29bb4138388c92c4b6437a65469902e706ccce3777991a47c0ee9701217fb44cb02e674c7539e473d20352a7a875d6cf3a038e655d3d1a75852fc1859835cc181ef0c58b888a6673bb8275cfb9797f5e146a962d8deb535fc7b166af4fc95209a2dd4c515a0c04ddadfb77ba6b8da76c9ae9cce13538608603ed3550c685bacdc0a059f92dad364f8a72dfaf52faf43011c33b6a6235d9ac6611f346d955e1701f37713bb6f98a05337b1943556d497a4a686645fc359375408702ec45617c949d1209824e4627741cf2760e6ae84e0e1d395da4da5748d042bff19351ff20092df201aeab4d6c603693e6ab01dabaf009a1c8a93e713dacd4a0d93695a2f6ef4e59ab9a140a100766b25b86cbb3632aa73fb16f47b9839e4d0e8736a0b81cdd923f0be79c4c7ab1f7e1be0ed5f079f6e04b979a567e81bc0c1a236e3daa2754f195ff76261"); str_check(run_keccakS("2B6DB7CED8665EBE9DEB080295218426BDAA7C6DA9ADD2088932CDFFBAA1C14129BCCDD70F369EFB149285858D2B1D155D14DE2FDB680A8B027284055182A0CAE275234CC9C92863C1B4AB66F304CF0621CD54565F5BFF461D3B461BD40DF28198E3732501B4860EADD503D26D6E69338F4E0456E9E9BAF3D827AE685FB1D817"), "dcac84568f15cac076854ea692de95e473768a99df9ac2328ee423d02eeb8ee8e1d1706213c4415dc7aafa66476d8ebdddd8bf39e1de05ca76c36e7e975629331f3a33c3ca4091c82004e5891b7e276d4642ea61bde021871c9b5c8cfa82144b7a4144b44ebe6093e95c59305fd36a8741c4f2df65cb0b59f803cfdcf2ce4b8b54857a7f8ad8477a22cffc07f4a5ef76996837c9b3f8ff25375e669e68faebed8ac79673860f45fd6e7ee7ed630cb8582785eed432af1f4ca22f4fd4488dc3882de5268ee4c47be4eaa2e77692880d218a69578fb090ed8426c29217b72070be2a4bf0d7bcb480ff262d2dc3bfa9c9d88ff6f5284d240e988e4f9b956aaa36e804bb38ce1f5aa65e73624940c28cf816f4c4f00751bcc6cdc79131e96294d95d6bc98f58a2a687ea7b0769a6dd4f4988b2381631ec1967ef4fcc9efb7a7519783a33787850a8c752f36a4b3abf7e460d5689009a232bef1c33fb3d99069b0157764e4477e2ad68b3a99a05bd2d38288ddd416ee784bdc99e157dfdec61ecb0c49763f187e947c54a4ecbf5eeb76af5feeb222b0844cc1bb9f69cf3b291671bbe98c89ef5d656dfc77375c39cbc2a7ff6413b3ca99834d1845499a09bc111c8582f567d187147c5bbade2194871126dda67daf170079a618a77b8f06193e06f87d441687d1de6e5cda9c791728f837c945f2eb20327802b37fc6d9c2b125f4067"); str_check(run_keccakS("3A3A819C48EFDE2AD914FBF00E18AB6BC4F14513AB27D0C178A188B61431E7F5623CB66B23346775D386B50E982C493ADBBFC54B9A3CD383382336A1A0B2150A15358F336D03AE18F666C7573D55C4FD181C29E6CCFDE63EA35F0ADF5885CFC0A3D84A2B2E4DD24496DB789E663170CEF74798AA1BBCD4574EA0BBA40489D764B2F83AADC66B148B4A0CD95246C127D5871C4F11418690A5DDF01246A0C80A43C70088B6183639DCFDA4125BD113A8F49EE23ED306FAAC576C3FB0C1E256671D817FC2534A52F5B439F72E424DE376F4C565CCA82307DD9EF76DA5B7C4EB7E085172E328807C02D011FFBF33785378D79DC266F6A5BE6BB0E4A92ECEEBAEB1"), "9435fc671dfcfcdac149277e2caaa80ed3d4a2359300db892b8093dffa9442bb5c08f242f2fc2cb5f8388032299f1df47a57489a4fc0d66d88e483092320a471897fb6ade67897e5138c45f19174a4b1ae0e510fa390825d17568989c3659fc57b9345d7d93ee588cb2629c5770808195257bbf42b069576d94011989dc6ebc43cfc7cd27b6f9853904f3eb3842bbb37d2bd807f05468f5057f78373b6f34462095a1205c1fca0d15fbcf890ee78ab6f94cb778b5d6f3620e6e6d6ee688eecc619e22e25e0bb5e143a53472e4f1d1f91a8e625087b0f608770c4b9909749ab50ddcdac59bb3c975aba4dceb2b3a2c436ed103ed6d9c62cd63a69a0bdd2baabfbfd63eef34507637f5e8a16a4fcb33d66141781e10bc6262833ec6e2953cedd5f652b76fa042ec0d34ba20f5657e28c08b6b61dfa8da78cf997127e17a35d75ab35542fe6bb9ce5bd06119da6b497ac1ae12947b0c214de28ed5dda7815fb6d5def81025934c877cb91e923191581b508bbabdfe4bb2dd5af6af414bfa28830e4380355bdf2483cabd01b046956b85d5a34f46849ba1cc869f5babd1b41ec775fcb4b5fbad79661daf47dbe7bc6380bc5034bfe626526f3305abe270bbbf29280e58b71db269cf7962d9dc1731bd10d5633b1b10e76791c0fcfddf1c8263f17f8b68b1a0589fe5c9403d272fa133442980588bc1f385c3af240d8f195ab1a3400"); end:; } /* * HMAC */ static const char *run_hmac(const char *key, const char *str, const struct DigestInfo *impl) { struct HMAC *ctx; uint8_t res[512]; int len = strlen(str); int reslen; ctx = hmac_new(impl, key, strlen(key), USUAL_ALLOC); if (!ctx) return "NOMEM"; reslen = hmac_result_len(ctx); hmac_update(ctx, str, len); hmac_final(ctx, res); return mkhex(res, reslen); } static const char *run_hmac_sha1(const char *key, const char *str) { return run_hmac(key, str, digest_SHA1()); } static void test_hmac(void *ptr) { const char *long_key = ( "quite a very long key, longer than a sha1 block size, " "so it needs to be sha-1d before being used as a key"); const char *text = "The quick brown fox jumps over the lazy dog"; str_check(run_hmac_sha1("", ""), "fbdb1d1b18aa6c08324b7d64b71fb76370690e1d"); str_check(run_hmac_sha1("shrt", ""), "41fee95de96c437cf6c2f38363eb38eb0067ff64"); str_check(run_hmac_sha1(long_key, ""), "496ca9bda3e523814ba7f99f68a2035e4de7702a"); str_check(run_hmac_sha1(long_key, text), "924e1ee84da31f5f569a27dd6201533b42c999c6"); str_check(run_hmac_sha1("key", text), "de7c9b85b8b78aa6bc8a7a36f70a90701c9db4d9"); end:; } /* * Launcher. */ struct testcase_t crypto_tests[] = { { "md5", test_md5 }, { "sha1", test_sha1 }, { "sha224", test_sha224 }, { "sha256", test_sha256 }, { "sha384", test_sha384 }, { "sha512", test_sha512 }, { "keccak224", test_keccak224 }, { "keccak256", test_keccak256 }, { "keccak384", test_keccak384 }, { "keccak512", test_keccak512 }, { "keccakStream", test_keccak_stream }, { "hmac", test_hmac }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_socket.c0000644000000000000000000000313212166266754014535 0ustar #include #include #include "test_common.h" static const char *ntop(int af, const void *src) { static char buf[128]; const char *res; res = inet_ntop(af, src, buf, sizeof(buf)); return res ? res : "NULL"; } static void test_ntop(void *z) { static const uint8_t data[] = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}; str_check(ntop(AF_INET, data), "1.2.3.4"); str_check(ntop(AF_INET6, data), "102:304:506:708:90a:b0c:d0e:f10"); end:; } static const char *pton(int af, const char *s) { static char str[128]; unsigned char buf[128]; int res; int len = (af == AF_INET) ? 4 : 16; memset(buf, 0xCC, sizeof(buf)); res = inet_pton(af, s, buf); if (res < 0) return "EAFBAD"; if (res == 0) return "FAIL"; if (buf[len] != 0xCC || buf[len + 1] != 0xCC) return "EOVER"; if (buf[len - 1] == 0xCC || buf[0] == 0xCC) return "EUNDER"; s = inet_ntop(af, buf, str, sizeof(str)); return s ? s : "NULL"; } static void test_pton(void *z) { str_check(pton(AF_INET, "127.0.0.255"), "127.0.0.255"); str_check(pton(AF_INET, "127.0.0"), "FAIL"); str_check(pton(AF_INET, "127.1.1.a"), "FAIL"); str_check(pton(AF_INET, "127.1.1.300"), "FAIL"); str_check(pton(AF_INET6, "0001:0002:ffff:4444:5555:6666:7777:8888"), "1:2:ffff:4444:5555:6666:7777:8888"); str_check(pton(AF_INET6, "::"), "::"); str_check(pton(AF_INET6, "F00F::5060"), "f00f::5060"); str_check(pton(AF_INET6, "F00F::127.0.0.1"), "f00f::7f00:1"); str_check(pton(AF_INET6, "::1:2:3:4:5:6:7:8"), "FAIL"); end:; } struct testcase_t socket_tests[] = { { "inet_ntop", test_ntop }, { "inet_pton", test_pton }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_common.c0000644000000000000000000000170012166266754014534 0ustar #include "test_common.h" struct testgroup_t groups[] = { { "base/", base_tests }, { "aatree/", aatree_tests }, { "bits/", bits_tests }, { "cxalloc/", cxalloc_tests }, { "cbtree/", cbtree_tests }, { "crypto/", crypto_tests }, { "hashing/", hashing_tests }, { "endian/", endian_tests }, { "string/", string_tests }, { "wchar/", wchar_tests }, { "fnmatch/", fnmatch_tests }, { "ctype/", ctype_tests }, { "heap/", heap_tests }, { "hashtab/", hashtab_tests }, { "list/", list_tests }, { "shlist/", shlist_tests }, { "utf8/", utf8_tests }, { "strpool/", strpool_tests }, { "pgutil/", pgutil_tests }, { "regex/", regex_tests }, { "socket/", socket_tests }, { "netdb/", netdb_tests }, { "cfparser/", cfparser_tests }, { "getopt/", getopt_tests }, { "mdict/", mdict_tests }, { "time/", time_tests }, { "fileutil/", fileutil_tests }, END_OF_GROUPS }; int main(int argc, const char *argv[]) { return tinytest_main(argc, argv, groups); } skytools-3.2.6/lib/test/test_common.h0000644000000000000000000000247412166266754014552 0ustar #include #include "tinytest.h" #include "tinytest_macros.h" #define str_check(a, b) tt_str_op(a, ==, b) #define int_check(a, b) tt_int_op(a, ==, b) #define ull_check(a, b) tt_assert_op_type(a, ==, b, uint64_t, "%" PRIu64) extern struct testcase_t aatree_tests[]; extern struct testcase_t cbtree_tests[]; extern struct testcase_t string_tests[]; extern struct testcase_t crypto_tests[]; extern struct testcase_t heap_tests[]; extern struct testcase_t list_tests[]; extern struct testcase_t utf8_tests[]; extern struct testcase_t strpool_tests[]; extern struct testcase_t pgutil_tests[]; extern struct testcase_t regex_tests[]; extern struct testcase_t cxalloc_tests[]; extern struct testcase_t bits_tests[]; extern struct testcase_t base_tests[]; extern struct testcase_t netdb_tests[]; extern struct testcase_t cfparser_tests[]; extern struct testcase_t endian_tests[]; extern struct testcase_t hashtab_tests[]; extern struct testcase_t mdict_tests[]; extern struct testcase_t shlist_tests[]; extern struct testcase_t time_tests[]; extern struct testcase_t hashing_tests[]; extern struct testcase_t fileutil_tests[]; extern struct testcase_t socket_tests[]; extern struct testcase_t getopt_tests[]; extern struct testcase_t ctype_tests[]; extern struct testcase_t fnmatch_tests[]; extern struct testcase_t wchar_tests[]; skytools-3.2.6/lib/test/test_aatree.c0000644000000000000000000001050712166266754014512 0ustar #include #include #include "test_common.h" static char *OK = "OK"; typedef struct MyNode MyNode; struct MyNode { struct AANode node; int value; }; static int my_node_pair_cmp(const struct AANode *n1, const struct AANode *n2) { const struct MyNode *m1 = container_of(n1, struct MyNode, node); const struct MyNode *m2 = container_of(n2, struct MyNode, node); return m1->value - m2->value; } static int my_node_cmp(uintptr_t value, struct AANode *node) { MyNode *my = container_of(node, MyNode, node); return value - my->value; } static MyNode *make_node(int value) { MyNode *node = malloc(sizeof(*node)); memset(node, 0, sizeof(*node)); node->value = value; return node; } static void my_node_free(struct AANode *node, void *arg) { MyNode *my = container_of(node, MyNode, node); free(my); } /* * Test tree sanity */ static const char *mkerr(const char *msg, int v, const struct AANode *node) { static char buf[128]; snprintf(buf, sizeof(buf), "%s: %d", msg, v); return buf; } static const char *check_sub(const struct AATree *tree, const struct AANode *node, int i) { int cmp_left = 0, cmp_right = 0; const char *res; if (aatree_is_nil_node(node)) return OK; if (node->level != node->left->level + 1) return mkerr("bad left level", i, node); if (!((node->level == node->right->level + 1) || (node->level == node->right->level && node->right->level != node->right->level + 1))) return mkerr("bad right level", i, node); if (!aatree_is_nil_node(node->left)) cmp_left = my_node_pair_cmp(node, node->left); if (!aatree_is_nil_node(node->right)) cmp_right = my_node_pair_cmp(node, node->right); if (cmp_left < 0) return mkerr("wrong left order", i, node); if (cmp_right > 0) return mkerr("wrong right order", i, node); res = check_sub(tree, node->left, i); if (!res) res = check_sub(tree, node->right, i); return res; } static const char *check(struct AATree *tree, int i) { return check_sub(tree, tree->root, i); } /* * checking operations */ static const char * my_search(struct AATree *tree, int value) { struct AANode *res; res = aatree_search(tree, value); return res ? OK : "not found"; } static const char *my_insert(struct AATree *tree, int value) { MyNode *my = make_node(value); aatree_insert(tree, value, &my->node); return check(tree, value); } static const char *my_remove(struct AATree *tree, int value) { const char *res; res = my_search(tree, value); if (res != OK) return res; aatree_remove(tree, value); res = check(tree, value); if (res != OK) return res; if (aatree_search(tree, value) != NULL) return "still found"; return OK; } /* * Simple opeartions. */ static void test_aatree_basic(void *p) { struct AATree tree[1]; int i; aatree_init(tree, my_node_cmp, my_node_free); str_check(my_search(tree, 1), "not found"); for (i = 0; i < 15; i++) { str_check(my_insert(tree, i), "OK"); } for (i = -1; i > -15; i--) { str_check(my_insert(tree, i), "OK"); } for (i = 30; i < 45; i++) { str_check(my_insert(tree, i), "OK"); } for (i = 15; i < 30; i++) { str_check(my_insert(tree, i), "OK"); } for (i = -14; i < 45; i++) { str_check(my_remove(tree, i), "OK"); } end: aatree_destroy(tree); } /* * randomized test */ #define RSIZE 3000 static int get_next(bool with_stat, bool added[]) { int r = random() % RSIZE; int i = r; while (1) { if (added[i] == with_stat) return i; if (++i >= RSIZE) i = 0; if (i == r) return -1; } } static void test_aatree_random(void *p) { bool is_added[RSIZE]; int prefer_remove = 0; /* 0 - insert, 1 - delete */ int n; int op; /* 0 - insert, 1 - delete */ struct AATree tree[1]; unsigned long long total = 0; srandom(123123); memset(is_added, 0, sizeof(is_added)); aatree_init(tree, my_node_cmp, my_node_free); while (total < 20000) { int r = random() & 15; if (prefer_remove) op = r > 5; else op = r > 10; /* op = 0; */ n = get_next(op, is_added); if (n < 0) { if (prefer_remove == op) { prefer_remove = !prefer_remove; } continue; } if (op == 0) { str_check(my_insert(tree, n), "OK"); is_added[n] = 1; } else { str_check(my_remove(tree, n), "OK"); is_added[n] = 0; } total++; } end: aatree_destroy(tree); } struct testcase_t aatree_tests[] = { { "basic", test_aatree_basic }, { "random", test_aatree_random }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_netdb.c0000644000000000000000000000161612166266754014346 0ustar #include #include #include #include #include "test_common.h" static int gotres; static void cb_func(union sigval v) { gotres++; } static void test_gai(void *p) { int res; struct sigevent sev; struct gaicb req; struct gaicb *rlist[] = { &req }; memset(&req, 0, sizeof(req)); req.ar_name = "localhost"; memset(&sev, 0, sizeof(sev)); sev.sigev_notify = SIGEV_THREAD; sev.sigev_notify_function = cb_func; res = getaddrinfo_a(GAI_NOWAIT, rlist, 1, &sev); if (res == EAI_SYSTEM && errno == ENOSYS) { /* ok - no impl */ goto end; } else { int_check(res, 0); } while (gai_error(&req) == EAI_INPROGRESS || gotres == 0) usleep(10000); int_check(gai_error(&req), 0); freeaddrinfo(req.ar_result); int_check(gotres, 1); end:; } struct testcase_t netdb_tests[] = { { "getaddrinfo_a", test_gai }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/Makefile0000644000000000000000000000242512166266754013506 0ustar AM_FEATURES = libusual SUBLOC = test DIST_SUBDIRS = attregex USUAL_DIR = $(abs_top_srcdir) regtest_system_SOURCES = \ test_string.c test_crypto.c test_aatree.c test_heap.c \ test_common.c test_list.c tinytest.c test_cbtree.c \ test_utf8.c test_strpool.c test_pgutil.c test_regex.c \ test_cxalloc.c test_bits.c test_base.c test_netdb.c \ test_cfparser.c test_endian.c test_hashtab.c test_mdict.c \ test_shlist.c test_time.c test_hashing.c test_fileutil.c \ test_socket.c test_getopt.c test_ctype.c test_fnmatch.c \ test_wchar.c \ test_common.h tinytest.h tinytest_macros.h # build regtest_system against actual library regtest_system_LDADD = ../libusual.a regtest_system_LDFLAGS = regtest_system_CPPFLAGS = -I.. -I. # build regtest_compat as embedded project regtest_compat_EMBED_LIBUSUAL = 1 regtest_compat_CPPFLAGS := -I.. -I. -DUSUAL_TEST_CONFIG regtest_compat_LDFLAGS = regtest_compat_SOURCES := $(regtest_system_SOURCES) nodist_regtest_compat_SOURCES = test_config.h EXTRA_DIST = Makefile tinytest_demo.c force_compat.sed test_cfparser.ini noinst_PROGRAMS = regtest_system EXTRA_PROGRAMS = regtest_compat include ../build.mk test_config.h: force_compat.sed ../usual/config.h $(E) " GEN-COMPAT" $@ $(Q) sed -f $^ > $@ clean: clean-local clean-local: $(Q) $(RM) -r fmod_test skytools-3.2.6/lib/test/test_utf8.c0000644000000000000000000000707312166266754014143 0ustar #include #include "test_common.h" #include #include static int uget1(int a) { char buf[2] = { a, 0 }; const char *p = buf; return utf8_get_char(&p, buf + 1); } static int uget2(int a, int b) { char buf[3] = { a, b, 0 }; const char *p = buf; return utf8_get_char(&p, buf + 2); } static int uget3(int a, int b, int c) { char buf[4] = { a, b, c, 0 }; const char *p = buf; return utf8_get_char(&p, buf + 3); } static int uget4(int a, int b, int c, int d) { char buf[5] = { a, b, c, d, 0 }; const char *p = buf; return utf8_get_char(&p, buf + 4); } static void test_utf8_char_size(void *p) { int_check(utf8_char_size(0), 1); int_check(utf8_char_size('a'), 1); int_check(utf8_char_size(0x7F), 1); int_check(utf8_char_size(0x80), 2); int_check(utf8_char_size(0x7FF), 2); int_check(utf8_char_size(0x800), 3); int_check(utf8_char_size(0xFFFF), 3); int_check(utf8_char_size(0x10000), 4); int_check(utf8_char_size(0x100000), 4); int_check(utf8_char_size(0x10FFFF), 4); end:; } static void test_utf8_seq_size(void *p) { int_check(utf8_seq_size(0), 1); int_check(utf8_seq_size(0x7F), 1); int_check(utf8_seq_size(0x80), 0); int_check(utf8_seq_size(0xBF), 0); int_check(utf8_seq_size(0xC0), 0); int_check(utf8_seq_size(0xC1), 0); int_check(utf8_seq_size(0xC2), 2); int_check(utf8_seq_size(0xDF), 2); int_check(utf8_seq_size(0xE0), 3); int_check(utf8_seq_size(0xEF), 3); int_check(utf8_seq_size(0xF0), 4); int_check(utf8_seq_size(0xF4), 4); int_check(utf8_seq_size(0xF5), 0); int_check(utf8_seq_size(0xFF), 0); end:; } static void test_utf8_get_char(void *p) { int_check(uget1(0), 0); int_check(uget1(0x7F), 0x7F); int_check(uget2(0xC2, 0xA2), 0xA2); int_check(uget2(0xC2, 0xA2), 0xA2); int_check(uget3(0xE2, 0x82, 0xAC), 0x20AC); int_check(uget4(0xF0, 0xA4, 0xAD, 0xA2), 0x024B62); /* invalid reads */ int_check(uget1(0x80), -0x80); int_check(uget1(0xC1), -0xC1); int_check(uget3(0xE0, 0x82, 0xAC), -0xE0); /* short reads */ int_check(uget1(0xC2), -0xC2); int_check(uget2(0xE2, 0x82), -0xE2); int_check(uget3(0xF0, 0xA4, 0xAD), -0xF0); end:; } static const char *uput(unsigned c, int buflen) { static char res[64]; unsigned char buf[8]; char *dst = (char *)buf; char *dstend = (char *)buf + buflen; unsigned len, i; bool ok; memset(buf, 11, sizeof(buf)); ok = utf8_put_char(c, &dst, dstend); if (!ok) return "FAILED"; len = dst - (char *)buf; for (i = len; i < 8; i++) { if (buf[i] != 11) return "OVER"; } snprintf(res, sizeof(res), "%02X %02X %02X %02X %02X %02X", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); if (len) res[len*3 - 1] = 0; else res[0] = 0; return res; } static void test_utf8_put_char(void *p) { str_check(uput(0, 1), "00"); str_check(uput(0x7F, 1), "7F"); str_check(uput(0xA2, 2), "C2 A2"); str_check(uput(0x20AC, 3), "E2 82 AC"); str_check(uput(0x024B62, 4), "F0 A4 AD A2"); str_check(uput(0x80FFFFFF, 5), ""); str_check(uput(0xD801, 5), ""); str_check(uput(0xFEFF, 5), "EF BB BF"); str_check(uput(0xFFFE, 5), "EF BF BE"); str_check(uput(0, 0), "FAILED"); str_check(uput(0xA2, 1), "FAILED"); str_check(uput(0x20AC, 2), "FAILED"); str_check(uput(0x20AC, 1), "FAILED"); str_check(uput(0x024B62, 3), "FAILED"); str_check(uput(0x024B62, 2), "FAILED"); str_check(uput(0x024B62, 1), "FAILED"); str_check(uput(0x024B62, 0), "FAILED"); end:; } /* * Describe */ struct testcase_t utf8_tests[] = { { "utf8_char_size", test_utf8_char_size }, { "utf8_seq_size", test_utf8_seq_size }, { "utf8_get_char", test_utf8_get_char }, { "utf8_put_char", test_utf8_put_char }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_pgutil.c0000644000000000000000000001125512166266754014556 0ustar #include #include "test_common.h" /* * pg_quote_literal */ static char *run_quote_lit(char *dst, const char *src, int size) { if (pg_quote_literal(dst, src, size)) return dst; return "FAIL"; } static void test_quote_lit(void *ptr) { char buf[128]; str_check(run_quote_lit(buf, "", 16), "''"); str_check(run_quote_lit(buf, "a", 16), "'a'"); str_check(run_quote_lit(buf, "a'a", 16), "'a''a'"); str_check(run_quote_lit(buf, "a\\a", 16), "E'a\\\\a'"); str_check(run_quote_lit(buf, "", 3), "''"); str_check(run_quote_lit(buf, "", 2), "FAIL"); str_check(run_quote_lit(buf, "", 1), "FAIL"); str_check(run_quote_lit(buf, "", 0), "FAIL"); str_check(run_quote_lit(buf, "a'a", 7), "'a''a'"); str_check(run_quote_lit(buf, "a'a", 6), "FAIL"); str_check(run_quote_lit(buf, "a\\a", 8), "E'a\\\\a'"); str_check(run_quote_lit(buf, "a\\a", 7), "FAIL"); str_check(run_quote_lit(buf, "a", 4), "'a'"); str_check(run_quote_lit(buf, "a", 3), "FAIL"); end:; } /* * quote_ident */ static char *qident(char *dst, const char *src, int size) { if (pg_quote_ident(dst, src, size)) return dst; return "FAIL"; } static void test_quote_ident(void *ptr) { char buf[128]; str_check(qident(buf, "", 16), "\"\""); str_check(qident(buf, "id_", 16), "id_"); str_check(qident(buf, "_id", 16), "_id"); str_check(qident(buf, "Baz", 16), "\"Baz\""); str_check(qident(buf, "baZ", 16), "\"baZ\""); str_check(qident(buf, "b z", 16), "\"b z\""); str_check(qident(buf, "5id", 16), "\"5id\""); str_check(qident(buf, "\"", 16), "\"\"\"\""); str_check(qident(buf, "a\"b", 16), "\"a\"\"b\""); str_check(qident(buf, "WHERE", 16), "\"WHERE\""); str_check(qident(buf, "where", 16), "\"where\""); str_check(qident(buf, "here", 16), "here"); str_check(qident(buf, "in", 16), "\"in\""); str_check(qident(buf, "", 3), "\"\""); str_check(qident(buf, "", 2), "FAIL"); str_check(qident(buf, "", 1), "FAIL"); str_check(qident(buf, "", 0), "FAIL"); str_check(qident(buf, "i", 2), "i"); str_check(qident(buf, "i", 1), "FAIL"); str_check(qident(buf, "i", 0), "FAIL"); str_check(qident(buf, "a\"b", 7), "\"a\"\"b\""); str_check(qident(buf, "a\"b", 6), "FAIL"); str_check(qident(buf, "a\"b", 5), "FAIL"); str_check(qident(buf, "a\"b", 4), "FAIL"); str_check(qident(buf, "a\"b", 3), "FAIL"); end:; } /* * quote_fqident */ static char *fqident(char *dst, const char *src, int size) { if (pg_quote_fqident(dst, src, size)) return dst; return "FAIL"; } static void test_quote_fqident(void *ptr) { char buf[128]; str_check(fqident(buf, "", 16), "public.\"\""); str_check(fqident(buf, "baz.foo", 16), "baz.foo"); str_check(fqident(buf, "baz.foo.bar", 16), "baz.\"foo.bar\""); str_check(fqident(buf, "where.in", 16), "\"where\".\"in\""); str_check(fqident(buf, "a.b", 4), "a.b"); str_check(fqident(buf, "a.b", 3), "FAIL"); str_check(fqident(buf, "a.b", 1), "FAIL"); str_check(fqident(buf, "a.b", 0), "FAIL"); str_check(fqident(buf, "i", 9), "public.i"); str_check(fqident(buf, "i", 8), "FAIL"); end:; } /* * pg_parse_array */ static char *aparse(const char *src) { struct StrList *sl = pg_parse_array(src, USUAL_ALLOC); static char buf[1024]; char *dst = buf; const char *s; int len; bool first = true; if (!sl) return "FAIL"; while (!strlist_empty(sl)) { if (first) first = false; else *dst++ = ':'; s = strlist_pop(sl); if (!s) { strcpy(dst, "NULL"); dst += 4; } else { len = strlen(s); memcpy(dst, s, len); free(s); dst += len; } } *dst = 0; strlist_free(sl); return buf; } static void test_parse_array(void *ptr) { str_check(aparse("{a,b,c}"), "a:b:c"); str_check(aparse("{a}"), "a"); str_check(aparse("{}"), ""); str_check(aparse("{ a }"), "a"); str_check(aparse("{null}"), "NULL"); str_check(aparse("{ Null , NULL , nUlL }"), "NULL:NULL:NULL"); str_check(aparse("{ \"Null\" , \"NULL\" , \"nUlL\" }"), "Null:NULL:nUlL"); str_check(aparse("{ \"\",\"\",\"\" }"), "::"); str_check(aparse("{,}"), "FAIL"); str_check(aparse("{ a b c , d,e ,f}"), "a b c:d:e:f"); str_check(aparse("{ \" a b c \" , \",d,\"}"), " a b c :,d,"); str_check(aparse("[1,2]={7,8,9}"), "7:8:9"); str_check(aparse("[1,2.={7}"), "FAIL"); str_check(aparse("{ \\\" , \"\\\"\" }"), "\":\""); str_check(aparse("{ \\,,\\\\}"), ",:\\"); str_check(aparse("{\\}}"), "}"); str_check(aparse("{abc"), "FAIL"); str_check(aparse(""), "FAIL"); str_check(aparse("{\"abc}"), "FAIL"); str_check(aparse("{\\"), "FAIL"); str_check(aparse("{abc ,"), "FAIL"); end:; } /* * Describe */ struct testcase_t pgutil_tests[] = { { "pg_quote_literal", test_quote_lit }, { "pg_quote_ident", test_quote_ident }, { "pg_quote_fqident", test_quote_fqident }, { "pg_parse_array", test_parse_array }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_hashtab.c0000644000000000000000000000413112166266754014657 0ustar #include #include #include #include "test_common.h" struct MyNode { int value; }; static int cf_size = 64; static int cf_ofs = 0; static int cf_cnt = 3 * 64; static int cf_mod = 13; static bool mycmp(const htab_val_t curval, const void *arg) { const struct MyNode *n1 = curval; const struct MyNode *n2 = arg; return n1->value == n2->value; } static struct MyNode *make_node(int v) { struct MyNode *n = malloc(sizeof(*n)); n->value = v; return n; } /* * checking operations */ static const char *my_insert(struct HashTab *htab, int value) { struct MyNode *my = make_node(value); void **p; int key = value % cf_mod; p = hashtab_lookup(htab, key, true, my); if (!p) return "FAIL"; if (*p) return "EXISTS?"; *p = my; return "OK"; } static const char *my_remove(struct HashTab *h, int value) { struct MyNode tmp, *my; void **p; int key = value % cf_mod; tmp.value = value; p = hashtab_lookup(h, key, false, &tmp); if (!p) return "NEXIST"; my = *p; if (my->value != value) return "WRONG"; hashtab_delete(h, key, &tmp); free(my); p = hashtab_lookup(h, key, false, &tmp); if (p) return "EXISTS?"; return "OK"; } static const char *my_lookup(struct HashTab *htab, int value) { void **p; struct MyNode tmp, *my; int key = value % cf_mod; tmp.value = value; p = hashtab_lookup(htab, key, false, &tmp); if (!p) return "NEXIST"; my = *p; if (my->value != value) return "WRONG"; return "OK"; } /* * Simple operations. */ static void test_hash_basic(void *p) { struct HashTab *htab; int i; htab = hashtab_create(cf_size, mycmp, USUAL_ALLOC); for (i = 0; i < cf_cnt; i++) { int n = i + cf_ofs; str_check(my_lookup(htab, n), "NEXIST"); str_check(my_insert(htab, n), "OK"); str_check(my_lookup(htab, n), "OK"); } for (i = 0; i < cf_cnt; i++) { int n = i + cf_ofs; str_check(my_lookup(htab, n), "OK"); str_check(my_remove(htab, n), "OK"); str_check(my_lookup(htab, n), "NEXIST"); } end: hashtab_destroy(htab); } struct testcase_t hashtab_tests[] = { { "basic", test_hash_basic }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_bits.c0000644000000000000000000000410612166266754014210 0ustar #include #include "test_common.h" /* * is_power_of_2 */ static void test_pow2(void *p) { int_check(is_power_of_2(0), 0); int_check(is_power_of_2(1), 1); int_check(is_power_of_2(2), 1); int_check(is_power_of_2(3), 0); end:; } /* * rol */ static void test_rol(void *p) { /* rol16 */ int_check(rol16(1, 1), 2); int_check(rol16(1, 15), 32768); int_check(rol16(0x8000, 1), 1); /* rol32 */ int_check(rol32(1, 1), 2); int_check(rol32(0x80000000, 1), 1); /* rol64 */ ull_check(rol64(1, 1), 2); ull_check(rol64(1, 63), 0x8000000000000000ULL); end:; } /* * ror */ static void test_ror(void *p) { /* ror16 */ int_check(ror16(1, 1), 0x8000); /* ror32 */ int_check(ror32(1, 1), 0x80000000); /* ror64 */ ull_check(ror64(1, 1), 0x8000000000000000ULL); end:; } /* * fls */ static void test_fls(void *p) { /* fls */ int_check(fls(0), 0); int_check(fls(1), 1); int_check(fls(3), 2); int_check(fls((int)-1), 32); /* flsl */ int_check(flsl(0), 0); int_check(flsl(1), 1); int_check(flsl(3), 2); if (sizeof(long) == 4) int_check(flsl((long)-1), 32); else int_check(flsl((long)-1), 64); /* flsll */ int_check(flsll(0), 0); int_check(flsll(1), 1); int_check(flsll(3), 2); int_check(flsll((long long)-1), 64); end:; } /* * ffs */ static void test_ffs(void *p) { /* ffs */ int_check(ffs(0), 0); int_check(ffs(1), 1); int_check(ffs(3), 1); int_check(ffs((int)-1), 1); int_check(ffs(ror32(1,1)), 32); /* flsl */ int_check(ffsl(0), 0); int_check(ffsl(1), 1); int_check(ffsl(3), 1); int_check(ffsl((long)-1), 1); if (sizeof(long) == 4) int_check(ffsl(ror32(1,1)), 32); else int_check(ffsl(ror64(1,1)), 64); /* ffsll */ int_check(ffsll(0), 0); int_check(ffsll(1), 1); int_check(ffsll(3), 1); int_check(ffsll((long long)-1), 1); ull_check((1ULL << 63), ror64(1,1)); int_check(ffsll(1ULL << 63), 64); int_check(ffsll(ror64(1,1)), 64); end:; } /* * Describe */ struct testcase_t bits_tests[] = { { "is_power_of_2", test_pow2 }, { "rol", test_rol }, { "ror", test_ror }, { "ffs", test_ffs }, { "fls", test_fls }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_heap.c0000644000000000000000000001013612166266754014164 0ustar #include #include #include "test_common.h" struct MyNode { int value; unsigned heap_idx; }; /* min-heap */ static bool heap_is_better(const void *a, const void *b) { const struct MyNode *aa = a, *bb = b; return (aa->value < bb->value); } static void my_save_pos(void *p, unsigned i) { struct MyNode *node = p; node->heap_idx = i; } static char *OK = "OK"; static struct MyNode *make_node(int v) { struct MyNode *n = malloc(sizeof(*n)); n->value = v; n->heap_idx = -1; return n; } static unsigned _heap_get_child(unsigned i, unsigned child_nr) { return 2*i + 1 + child_nr; } static bool _heap_is_better(struct Heap *h, unsigned i1, unsigned i2) { return heap_is_better(heap_get_obj(h, i1), heap_get_obj(h, i2)); } /* * Test tree sanity */ static const char *mkerr(const char *msg, unsigned idx, int val) { static char buf[128]; snprintf(buf, sizeof(buf), "%s: idx=%d curval=%d", msg, idx, val); return buf; } static const char *check_sub(struct Heap *heap, unsigned idx, int i) { unsigned c0 = _heap_get_child(idx, 0); unsigned c1 = _heap_get_child(idx, 1); struct MyNode *n; const char *res; unsigned used = heap_size(heap); if (idx >= used) return OK; n = heap_get_obj(heap, idx); if (n->heap_idx != idx) return mkerr("wrong saved idx", idx, i); if (c0 < used && _heap_is_better(heap, c0, idx)) return mkerr("c0 wrong order", idx, i); if (c1 < used && _heap_is_better(heap, c1, idx)) return mkerr("c1 wrong order", idx, i); res = check_sub(heap, c0, i); if (res == OK) res = check_sub(heap, c1, i); return res; } static const char *check(struct Heap *heap, int i) { return check_sub(heap, 0, i); } /* * checking operations */ static const char *my_insert(struct Heap *heap, int value) { struct MyNode *my = make_node(value); if (!heap_push(heap, my)) return "FAIL"; return check(heap, value); } static const char *my_remove(struct Heap *h, unsigned idx) { struct MyNode *n; if (idx >= heap_size(h)) return "NEXIST"; n = heap_get_obj(h, idx); heap_remove(h, idx); free(n); return check(h, 0); } static const char *my_clean(struct Heap *heap) { const char *res; while (heap_size(heap) > 0) { res = my_remove(heap, 0); if (res != OK) return res; } return OK; } /* * Simple operations. */ static void test_heap_basic(void *p) { struct Heap *heap; int i; heap = heap_create(heap_is_better, my_save_pos, USUAL_ALLOC); str_check(my_remove(heap, 0), "NEXIST"); str_check(my_insert(heap, 0), "OK"); str_check(my_remove(heap, 0), "OK"); for (i = 0; i < 15; i++) { str_check(my_insert(heap, i), "OK"); } str_check(my_clean(heap), "OK"); for (i = -1; i > -15; i--) { str_check(my_insert(heap, i), "OK"); } str_check(my_clean(heap), "OK"); for (i = 30; i < 45; i++) { str_check(my_insert(heap, i), "OK"); } str_check(my_clean(heap), "OK"); for (i = 15; i < 30; i++) { str_check(my_insert(heap, i), "OK"); } str_check(my_clean(heap), "OK"); end: heap_destroy(heap); } #if 0 /* * randomized test */ #define RSIZE 3000 static int get_next(bool with_stat, bool added[]) { int r = random() % RSIZE; int i = r; while (1) { if (added[i] == with_stat) return i; if (++i >= RSIZE) i = 0; if (i == r) return -1; } } static void test_aatree_random(void *p) { bool is_added[RSIZE]; int prefer_remove = 0; /* 0 - insert, 1 - delete */ int n; int op; /* 0 - insert, 1 - delete */ struct AATree tree[1]; unsigned long long total = 0; srandom(123123); memset(is_added, 0, sizeof(is_added)); aatree_init(tree, my_node_cmp, my_node_free); while (total < 100000) { int r = random() & 15; if (prefer_remove) op = r > 5; else op = r > 10; /* op = 0; */ n = get_next(op, is_added); if (n < 0) { if (prefer_remove == op) { prefer_remove = !prefer_remove; } continue; } if (op == 0) { str_check(my_insert(tree, n), "OK"); is_added[n] = 1; } else { str_check(my_remove(tree, n), "OK"); is_added[n] = 0; } total++; } end: aatree_destroy(tree); } #endif struct testcase_t heap_tests[] = { { "basic", test_heap_basic }, /* { "random", test_aatree_random }, */ END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_wchar.c0000644000000000000000000000514512166266754014357 0ustar #include #include #include "test_common.h" /* * mbstr_decode() */ static const char *decode(const char *s, int inbuf) { static char out[128]; wchar_t tmp[128]; wchar_t *res; int reslen = 4; unsigned i; for (i = 0; i < 128; i++) tmp[i] = '~'; res = mbstr_decode(s, inbuf, &reslen, tmp, sizeof(tmp), true); if (res == NULL) { if (errno == EILSEQ) return "EILSEQ"; if (errno == ENOMEM) return "ENOMEM"; return "NULL??"; } if (res != tmp) return "EBUF"; if (res[reslen] == 0) res[reslen] = 'Z'; else return "reslen fail?"; for (i = 0; i < 128; i++) { out[i] = tmp[i]; if (out[i] == '~') { out[i+1] = 0; break; } else if (out[i] == 0) { out[i] = '#'; } else if (tmp[i] > 127) { out[i] = 'A' + tmp[i] % 26; } } return out; } static void test_mbstr_decode(void *p) { str_check(decode("", 0), "Z~"); str_check(decode("", 1), "Z~"); str_check(decode("a", 0), "Z~"); str_check(decode("abc", 0), "Z~"); str_check(decode("abc", 1), "aZ~"); str_check(decode("abc", 2), "abZ~"); str_check(decode("abc", 3), "abcZ~"); str_check(decode("abc", 4), "abcZ~"); str_check(decode("abc", 5), "abcZ~"); if (MB_CUR_MAX > 1) { str_check(decode("aa\200cc", 5), "aaYccZ~"); str_check(decode("a\200cc", 5), "aYccZ~"); str_check(decode("aa\200c", 5), "aaYcZ~"); } end:; } /* * mbsnrtowcs() */ static const char *mbsnr(const char *str, int inbuf, int outbuf) { static char out[128]; wchar_t tmp[128]; int res; unsigned i; const char *s = str; mbstate_t ps; for (i = 0; i < 128; i++) tmp[i] = '~'; memset(&ps, 0, sizeof(ps)); res = mbsnrtowcs(tmp, &s, inbuf, outbuf, &ps); if (res < 0) { if (errno == EILSEQ) { snprintf(out, sizeof(out), "EILSEQ(%d)", (int)(s - str)); return out; } return "unknown error"; } if (tmp[res] == 0) tmp[res] = s ? 'z' : 'Z'; for (i = 0; i < 128; i++) { out[i] = tmp[i]; if (out[i] == '~') { out[i+1] = 0; break; } } return out; } static void test_mbsnrtowcs(void *p) { str_check(mbsnr("", 1, 1), "Z~"); str_check(mbsnr("", 0, 0), "~"); str_check(mbsnr("", 0, 1), "~"); /* XXX */ str_check(mbsnr("", 1, 0), "~"); str_check(mbsnr("x", 1, 1), "x~"); str_check(mbsnr("x", 0, 0), "~"); str_check(mbsnr("x", 0, 1), "~"); /* XXX */ str_check(mbsnr("x", 1, 0), "~"); str_check(mbsnr("abc", 3, 3), "abc~"); str_check(mbsnr("abc", 3, 4), "abc~"); /* XXX */ str_check(mbsnr("abc", 4, 3), "abc~"); str_check(mbsnr("abc", 4, 4), "abcZ~"); end:; } /* * Describe */ struct testcase_t wchar_tests[] = { { "mbsnrtowcs", test_mbsnrtowcs }, { "mbstr_decode", test_mbstr_decode }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_ctype.c0000644000000000000000000000204412166266754014372 0ustar #include #include #include "test_common.h" #include /* * if char works */ static void test_ctype_char(void *p) { int c, cx; for (c = 0; c < 256; c++) { cx = (int)(char)c; int_check(isalnum(c), isalnum(cx)); int_check(isalpha(c), isalpha(cx)); int_check(isascii(c), isascii(cx)); int_check(isblank(c), isblank(cx)); int_check(iscntrl(c), iscntrl(cx)); int_check(isdigit(c), isdigit(cx)); int_check(islower(c), islower(cx)); int_check(isgraph(c), isgraph(cx)); int_check(isprint(c), isprint(cx)); int_check(ispunct(c), ispunct(cx)); int_check(isspace(c), isspace(cx)); int_check(isupper(c), isupper(cx)); int_check(isxdigit(c), isxdigit(cx)); if (c == 255) { int_check(toupper(c), (unsigned char)toupper(cx)); int_check(tolower(c), (unsigned char)tolower(cx)); } else { int_check(toupper(c), toupper(cx)); int_check(tolower(c), tolower(cx)); } } end:; } /* * Describe */ struct testcase_t ctype_tests[] = { { "ctype_char", test_ctype_char }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/tinytest_demo.c0000644000000000000000000001572212166266754015105 0ustar /* tinytest_demo.c -- Copyright 2009 Nick Mathewson * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Welcome to the example file for tinytest! I'll show you how to set up * some simple and not-so-simple testcases. */ /* Make sure you include these headers. */ #include "tinytest.h" #include "tinytest_macros.h" #include #include #include #include /* ============================================================ */ /* First, let's see if strcmp is working. (All your test cases should be * functions declared to take a single void * as) an argument. */ void test_strcmp(void *data) { (void)data; /* This testcase takes no data. */ /* Let's make sure the empty string is equal to itself */ if (strcmp("","")) { /* This macro tells tinytest to stop the current test * and go straight to the "end" label. */ tt_abort_msg("The empty string was not equal to itself"); } /* Pretty often, calling tt_abort_msg to indicate failure is more heavy-weight than you want. Instead, just say: */ tt_assert(strcmp("testcase", "testcase") == 0); /* Occasionally, you don't want to stop the current testcase just because a single assertion has failed. In that case, use tt_want: */ tt_want(strcmp("tinytest", "testcase") > 0); /* You can use the tt_*_op family of macros to compare values and to fail unless they have the relationship you want. They produce more useful output than tt_assert, since they display the actual values of the failing things. Fail unless strcmp("abc, "abc") == 0 */ tt_int_op(strcmp("abc", "abc"), ==, 0); /* Fail unless strcmp("abc, "abcd") is less than 0 */ tt_int_op(strcmp("abc", "abcd"), < , 0); /* Incidentally, there's a test_str_op that uses strcmp internally. */ tt_str_op("abc", <, "abcd"); /* Every test-case function needs to finish with an "end:" label and (optionally) code to clean up local variables. */ end: ; } /* ============================================================ */ /* Now let's mess with setup and teardown functions! These are handy if you have a bunch of tests that all need a similar environment, and you wnat to reconstruct that environment freshly for each one. */ /* First you declare a type to hold the environment info, and functions to set it up and tear it down. */ struct data_buffer { /* We're just going to have couple of character buffer. Using setup/teardown functions is probably overkill for this case. You could also do file descriptors, complicated handles, temporary files, etc. */ char buffer1[512]; char buffer2[512]; }; /* The setup function needs to take a const struct testcase_t and return void* */ void * setup_data_buffer(const struct testcase_t *testcase) { struct data_buffer *db = malloc(sizeof(struct data_buffer)); /* If you had a complicated set of setup rules, you might behave differently here depending on testcase->flags or testcase->setup_data or even or testcase->name. */ /* Returning a NULL here would mean that we couldn't set up for this test, so we don't need to test db for null. */ return db; } /* The clean function deallocates storage carefully and returns true on success. */ int clean_data_buffer(const struct testcase_t *testcase, void *ptr) { struct data_buffer *db = ptr; if (db) { free(db); return 1; } return 0; } /* Finally, declare a testcase_setup_t with these functions. */ struct testcase_setup_t data_buffer_setup = { setup_data_buffer, clean_data_buffer }; /* Now let's write our test. */ void test_memcpy(void *ptr) { /* This time, we use the argument. */ struct data_buffer *db = ptr; /* We'll also introduce a local variable that might need cleaning up. */ char *mem = NULL; /* Let's make sure that memcpy does what we'd like. */ strcpy(db->buffer1, "String 0"); memcpy(db->buffer2, db->buffer1, sizeof(db->buffer1)); tt_str_op(db->buffer1, ==, db->buffer2); /* Now we've allocated memory that's referenced by a local variable. The end block of the function will clean it up. */ mem = strdup("Hello world."); tt_assert(mem); /* Another rather trivial test. */ tt_str_op(db->buffer1, !=, mem); end: /* This time our end block has something to do. */ if (mem) free(mem); } /* ============================================================ */ /* Now we need to make sure that our tests get invoked. First, you take a bunch of related tests and put them into an array of struct testcase_t. */ struct testcase_t demo_tests[] = { /* Here's a really simple test: it has a name you can refer to it with, and a function to invoke it. */ { "strcmp", test_strcmp, }, /* The second test has a flag, "TT_FORK", to make it run in a subprocess, and a pointer to the testcase_setup_t that configures its environment. */ { "memcpy", test_memcpy, TT_FORK, &data_buffer_setup }, /* The array has to end with END_OF_TESTCASES. */ END_OF_TESTCASES }; /* Next, we make an array of testgroups. This is mandatory. Unlike more heavy-duty testing frameworks, groups can't next. */ struct testgroup_t groups[] = { /* Every group has a 'prefix', and an array of tests. That's it. */ { "demo/", demo_tests }, END_OF_GROUPS }; int main(int c, const char **v) { /* Finally, just call tinytest_main(). It lets you specify verbose or quiet output with --verbose and --quiet. You can list specific tests: tinytest-demo demo/memcpy or use a ..-wildcard to select multiple tests with a common prefix: tinytest-demo demo/.. If you list no tests, you get them all by default, so that "tinytest-demo" and "tinytest-demo .." mean the same thing. */ return tinytest_main(c, v, groups); } skytools-3.2.6/lib/test/awk_test.sh0000755000000000000000000000135412166266754014226 0ustar #! /bin/sh # test find_modules.sh vs. various awks # random awks that may be around awk_list="mawk gawk nawk oawk" awk_list="$awk_list heirloom-nawk heirloom-oawk" awk_list="$awk_list original-awk plan9-awk" fmod=../find_modules.sh dir=fmod_test usual_dir=.. rm -rf $dir mkdir $dir ok=1 for f in *.c; do printf "$f .. " # write reference with default 'awk' ref=$dir/$f.awk $fmod $usual_dir $f > $ref 2>&1 for a in $awk_list; do which $a > /dev/null || continue printf "$a " out=$dir/$f.$a AWK=$a \ $fmod $usual_dir $f > $out 2>&1 cmp -s $ref $out || { printf "(FAIL) " ok=0 } done echo "" done if test $ok = 1; then echo "All OK" else echo "FAIL: not all tests passed" exit 1 fi skytools-3.2.6/lib/test/attregex/0000755000000000000000000000000012166266754013666 5ustar skytools-3.2.6/lib/test/attregex/data/0000755000000000000000000000000012166266754014577 5ustar skytools-3.2.6/lib/test/attregex/data/nullsubexpr.dat0000644000000000000000000000353212166266754017657 0ustar NOTE null subexpression matches : 2002-06-06 E (a*)* a (0,1)(0,1) E SAME x (0,0)(0,0) E SAME aaaaaa (0,6)(0,6) E SAME aaaaaax (0,6)(0,6) E (a*)+ a (0,1)(0,1) E SAME x (0,0)(0,0) E SAME aaaaaa (0,6)(0,6) E SAME aaaaaax (0,6)(0,6) E (a+)* a (0,1)(0,1) E SAME x (0,0) E SAME aaaaaa (0,6)(0,6) E SAME aaaaaax (0,6)(0,6) E (a+)+ a (0,1)(0,1) E SAME x NOMATCH E SAME aaaaaa (0,6)(0,6) E SAME aaaaaax (0,6)(0,6) E ([a]*)* a (0,1)(0,1) E SAME x (0,0)(0,0) E SAME aaaaaa (0,6)(0,6) E SAME aaaaaax (0,6)(0,6) E ([a]*)+ a (0,1)(0,1) E SAME x (0,0)(0,0) E SAME aaaaaa (0,6)(0,6) E SAME aaaaaax (0,6)(0,6) E ([^b]*)* a (0,1)(0,1) E SAME b (0,0)(0,0) E SAME aaaaaa (0,6)(0,6) E SAME aaaaaab (0,6)(0,6) E ([ab]*)* a (0,1)(0,1) E SAME aaaaaa (0,6)(0,6) E SAME ababab (0,6)(0,6) E SAME bababa (0,6)(0,6) E SAME b (0,1)(0,1) E SAME bbbbbb (0,6)(0,6) E SAME aaaabcde (0,5)(0,5) E ([^a]*)* b (0,1)(0,1) E SAME bbbbbb (0,6)(0,6) E SAME aaaaaa (0,0)(0,0) E ([^ab]*)* ccccxx (0,6)(0,6) E SAME ababab (0,0)(0,0) E ((z)+|a)* zabcde (0,2)(1,2) {E a+? aaaaaa (0,1) no *? +? mimimal match ops E (a) aaa (0,1)(0,1) E (a*?) aaa (0,0)(0,0) E (a)*? aaa (0,0) E (a*?)*? aaa (0,0) } B \(a*\)*\(x\) x (0,1)(0,0)(0,1) B \(a*\)*\(x\) ax (0,2)(0,1)(1,2) B \(a*\)*\(x\) axa (0,2)(0,1)(1,2) B \(a*\)*\(x\)\(\1\) x (0,1)(0,0)(0,1)(1,1) B \(a*\)*\(x\)\(\1\) ax (0,2)(1,1)(1,2)(2,2) B \(a*\)*\(x\)\(\1\) axa (0,3)(0,1)(1,2)(2,3) B \(a*\)*\(x\)\(\1\)\(x\) axax (0,4)(0,1)(1,2)(2,3)(3,4) B \(a*\)*\(x\)\(\1\)\(x\) axxa (0,3)(1,1)(1,2)(2,2)(2,3) E (a*)*(x) x (0,1)(0,0)(0,1) E (a*)*(x) ax (0,2)(0,1)(1,2) E (a*)*(x) axa (0,2)(0,1)(1,2) E (a*)+(x) x (0,1)(0,0)(0,1) E (a*)+(x) ax (0,2)(0,1)(1,2) E (a*)+(x) axa (0,2)(0,1)(1,2) E (a*){2}(x) x (0,1)(0,0)(0,1) E (a*){2}(x) ax (0,2)(1,1)(1,2) E (a*){2}(x) axa (0,2)(1,1)(1,2) skytools-3.2.6/lib/test/attregex/data/repetition.dat0000644000000000000000000001254012166266754017455 0ustar NOTE implicit vs. explicit repetitions : 2009-02-02 # Glenn Fowler # conforming matches (column 4) must match one of the following BREs # NOMATCH # (0,.)\((\(.\),\(.\))(?,?)(\2,\3)\)* # (0,.)\((\(.\),\(.\))(\2,\3)(?,?)\)* # i.e., each 3-tuple has two identical elements and one (?,?) E ((..)|(.)) NULL NOMATCH E ((..)|(.))((..)|(.)) NULL NOMATCH E ((..)|(.))((..)|(.))((..)|(.)) NULL NOMATCH E ((..)|(.)){1} NULL NOMATCH E ((..)|(.)){2} NULL NOMATCH E ((..)|(.)){3} NULL NOMATCH E ((..)|(.))* NULL (0,0) E ((..)|(.)) a (0,1)(0,1)(?,?)(0,1) E ((..)|(.))((..)|(.)) a NOMATCH E ((..)|(.))((..)|(.))((..)|(.)) a NOMATCH E ((..)|(.)){1} a (0,1)(0,1)(?,?)(0,1) E ((..)|(.)){2} a NOMATCH E ((..)|(.)){3} a NOMATCH E ((..)|(.))* a (0,1)(0,1)(?,?)(0,1) E ((..)|(.)) aa (0,2)(0,2)(0,2)(?,?) E ((..)|(.))((..)|(.)) aa (0,2)(0,1)(?,?)(0,1)(1,2)(?,?)(1,2) E ((..)|(.))((..)|(.))((..)|(.)) aa NOMATCH E ((..)|(.)){1} aa (0,2)(0,2)(0,2)(?,?) E ((..)|(.)){2} aa (0,2)(1,2)(?,?)(1,2) E ((..)|(.)){3} aa NOMATCH E ((..)|(.))* aa (0,2)(0,2)(0,2)(?,?) E ((..)|(.)) aaa (0,2)(0,2)(0,2)(?,?) E ((..)|(.))((..)|(.)) aaa (0,3)(0,2)(0,2)(?,?)(2,3)(?,?)(2,3) E ((..)|(.))((..)|(.))((..)|(.)) aaa (0,3)(0,1)(?,?)(0,1)(1,2)(?,?)(1,2)(2,3)(?,?)(2,3) E ((..)|(.)){1} aaa (0,2)(0,2)(0,2)(?,?) E ((..)|(.)){2} aaa (0,3)(2,3)(?,?)(2,3) E ((..)|(.)){3} aaa (0,3)(2,3)(?,?)(2,3) E ((..)|(.))* aaa (0,3)(2,3)(?,?)(2,3) E ((..)|(.)) aaaa (0,2)(0,2)(0,2)(?,?) E ((..)|(.))((..)|(.)) aaaa (0,4)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?) E ((..)|(.))((..)|(.))((..)|(.)) aaaa (0,4)(0,2)(0,2)(?,?)(2,3)(?,?)(2,3)(3,4)(?,?)(3,4) E ((..)|(.)){1} aaaa (0,2)(0,2)(0,2)(?,?) E ((..)|(.)){2} aaaa (0,4)(2,4)(2,4)(?,?) E ((..)|(.)){3} aaaa (0,4)(3,4)(?,?)(3,4) E ((..)|(.))* aaaa (0,4)(2,4)(2,4)(?,?) E ((..)|(.)) aaaaa (0,2)(0,2)(0,2)(?,?) E ((..)|(.))((..)|(.)) aaaaa (0,4)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?) E ((..)|(.))((..)|(.))((..)|(.)) aaaaa (0,5)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?)(4,5)(?,?)(4,5) E ((..)|(.)){1} aaaaa (0,2)(0,2)(0,2)(?,?) E ((..)|(.)){2} aaaaa (0,4)(2,4)(2,4)(?,?) E ((..)|(.)){3} aaaaa (0,5)(4,5)(?,?)(4,5) E ((..)|(.))* aaaaa (0,5)(4,5)(?,?)(4,5) E ((..)|(.)) aaaaaa (0,2)(0,2)(0,2)(?,?) E ((..)|(.))((..)|(.)) aaaaaa (0,4)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?) E ((..)|(.))((..)|(.))((..)|(.)) aaaaaa (0,6)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?)(4,6)(4,6)(?,?) E ((..)|(.)){1} aaaaaa (0,2)(0,2)(0,2)(?,?) E ((..)|(.)){2} aaaaaa (0,4)(2,4)(2,4)(?,?) E ((..)|(.)){3} aaaaaa (0,6)(4,6)(4,6)(?,?) E ((..)|(.))* aaaaaa (0,6)(4,6)(4,6)(?,?) NOTE additional repetition tests graciously provided by Chris Kuklewicz www.haskell.org 2009-02-02 # These test a bug in OS X / FreeBSD / NetBSD, and libtree. # Linux/GLIBC gets the {8,} and {8,8} wrong. :HA#100:E X(.?){0,}Y X1234567Y (0,9)(7,8) :HA#101:E X(.?){1,}Y X1234567Y (0,9)(7,8) :HA#102:E X(.?){2,}Y X1234567Y (0,9)(7,8) :HA#103:E X(.?){3,}Y X1234567Y (0,9)(7,8) :HA#104:E X(.?){4,}Y X1234567Y (0,9)(7,8) :HA#105:E X(.?){5,}Y X1234567Y (0,9)(7,8) :HA#106:E X(.?){6,}Y X1234567Y (0,9)(7,8) :HA#107:E X(.?){7,}Y X1234567Y (0,9)(7,8) :HA#108:E X(.?){8,}Y X1234567Y (0,9)(8,8) :HA#110:E X(.?){0,8}Y X1234567Y (0,9)(7,8) :HA#111:E X(.?){1,8}Y X1234567Y (0,9)(7,8) :HA#112:E X(.?){2,8}Y X1234567Y (0,9)(7,8) :HA#113:E X(.?){3,8}Y X1234567Y (0,9)(7,8) :HA#114:E X(.?){4,8}Y X1234567Y (0,9)(7,8) :HA#115:E X(.?){5,8}Y X1234567Y (0,9)(7,8) :HA#116:E X(.?){6,8}Y X1234567Y (0,9)(7,8) :HA#117:E X(.?){7,8}Y X1234567Y (0,9)(7,8) :HA#118:E X(.?){8,8}Y X1234567Y (0,9)(8,8) # These test a fixed bug in my regex-tdfa that did not keep the expanded # form properly grouped, so right association did the wrong thing with # these ambiguous patterns (crafted just to test my code when I became # suspicious of my implementation). The first subexpression should use # "ab" then "a" then "bcd". # OS X / FreeBSD / NetBSD badly fail many of these, with impossible # results like (0,6)(4,5)(6,6). :HA#260:E (a|ab|c|bcd){0,}(d*) ababcd (0,6)(3,6)(6,6) :HA#261:E (a|ab|c|bcd){1,}(d*) ababcd (0,6)(3,6)(6,6) :HA#262:E (a|ab|c|bcd){2,}(d*) ababcd (0,6)(3,6)(6,6) :HA#263:E (a|ab|c|bcd){3,}(d*) ababcd (0,6)(3,6)(6,6) :HA#264:E (a|ab|c|bcd){4,}(d*) ababcd NOMATCH :HA#265:E (a|ab|c|bcd){0,10}(d*) ababcd (0,6)(3,6)(6,6) :HA#266:E (a|ab|c|bcd){1,10}(d*) ababcd (0,6)(3,6)(6,6) :HA#267:E (a|ab|c|bcd){2,10}(d*) ababcd (0,6)(3,6)(6,6) :HA#268:E (a|ab|c|bcd){3,10}(d*) ababcd (0,6)(3,6)(6,6) :HA#269:E (a|ab|c|bcd){4,10}(d*) ababcd NOMATCH :HA#270:E (a|ab|c|bcd)*(d*) ababcd (0,6)(3,6)(6,6) :HA#271:E (a|ab|c|bcd)+(d*) ababcd (0,6)(3,6)(6,6) # The above worked on Linux/GLIBC but the following often fail. # They also trip up OS X / FreeBSD / NetBSD: :HA#280:E (ab|a|c|bcd){0,}(d*) ababcd (0,6)(3,6)(6,6) :HA#281:E (ab|a|c|bcd){1,}(d*) ababcd (0,6)(3,6)(6,6) :HA#282:E (ab|a|c|bcd){2,}(d*) ababcd (0,6)(3,6)(6,6) :HA#283:E (ab|a|c|bcd){3,}(d*) ababcd (0,6)(3,6)(6,6) :HA#284:E (ab|a|c|bcd){4,}(d*) ababcd NOMATCH :HA#285:E (ab|a|c|bcd){0,10}(d*) ababcd (0,6)(3,6)(6,6) :HA#286:E (ab|a|c|bcd){1,10}(d*) ababcd (0,6)(3,6)(6,6) :HA#287:E (ab|a|c|bcd){2,10}(d*) ababcd (0,6)(3,6)(6,6) :HA#288:E (ab|a|c|bcd){3,10}(d*) ababcd (0,6)(3,6)(6,6) :HA#289:E (ab|a|c|bcd){4,10}(d*) ababcd NOMATCH :HA#290:E (ab|a|c|bcd)*(d*) ababcd (0,6)(3,6)(6,6) :HA#291:E (ab|a|c|bcd)+(d*) ababcd (0,6)(3,6)(6,6) skytools-3.2.6/lib/test/attregex/data/basic.dat0000644000000000000000000002057312166266754016361 0ustar NOTE all standard compliant implementations should pass these : 2002-05-31 BE abracadabra$ abracadabracadabra (7,18) BE a...b abababbb (2,7) BE XXXXXX ..XXXXXX (2,8) E \) () (1,2) BE a] a]a (0,2) B } } (0,1) E \} } (0,1) BE \] ] (0,1) B ] ] (0,1) E ] ] (0,1) B { { (0,1) B } } (0,1) BE ^a ax (0,1) BE \^a a^a (1,3) BE a\^ a^ (0,2) BE a$ aa (1,2) BE a\$ a$ (0,2) BE ^$ NULL (0,0) E $^ NULL (0,0) E a($) aa (1,2)(2,2) E a*(^a) aa (0,1)(0,1) E (..)*(...)* a (0,0) E (..)*(...)* abcd (0,4)(2,4) E (ab|a)(bc|c) abc (0,3)(0,2)(2,3) E (ab)c|abc abc (0,3)(0,2) E a{0}b ab (1,2) E (a*)(b?)(b+)b{3} aaabbbbbbb (0,10)(0,3)(3,4)(4,7) E (a*)(b{0,1})(b{1,})b{3} aaabbbbbbb (0,10)(0,3)(3,4)(4,7) E a{9876543210} NULL BADBR E ((a|a)|a) a (0,1)(0,1)(0,1) E (a*)(a|aa) aaaa (0,4)(0,3)(3,4) E a*(a.|aa) aaaa (0,4)(2,4) E a(b)|c(d)|a(e)f aef (0,3)(?,?)(?,?)(1,2) E (a|b)?.* b (0,1)(0,1) E (a|b)c|a(b|c) ac (0,2)(0,1) E (a|b)c|a(b|c) ab (0,2)(?,?)(1,2) E (a|b)*c|(a|ab)*c abc (0,3)(1,2) E (a|b)*c|(a|ab)*c xc (1,2) E (.a|.b).*|.*(.a|.b) xa (0,2)(0,2) E a?(ab|ba)ab abab (0,4)(0,2) E a?(ac{0}b|ba)ab abab (0,4)(0,2) E ab|abab abbabab (0,2) E aba|bab|bba baaabbbaba (5,8) E aba|bab baaabbbaba (6,9) E (aa|aaa)*|(a|aaaaa) aa (0,2)(0,2) E (a.|.a.)*|(a|.a...) aa (0,2)(0,2) E ab|a xabc (1,3) E ab|a xxabc (2,4) Ei (Ab|cD)* aBcD (0,4)(2,4) BE [^-] --a (2,3) BE [a-]* --a (0,3) BE [a-m-]* --amoma-- (0,4) E :::1:::0:|:::1:1:0: :::0:::1:::1:::0: (8,17) E :::1:::0:|:::1:1:1: :::0:::1:::1:::0: (8,17) {E [[:upper:]] A (0,1) [[]] not supported E [[:lower:]]+ `az{ (1,3) E [[:upper:]]+ @AZ[ (1,3) BE [[-]] [[-]] (2,4) BE [[.NIL.]] NULL ECOLLATE BE [[=aleph=]] NULL ECOLLATE } BE$ \n \n (0,1) BEn$ \n \n (0,1) BE$ [^a] \n (0,1) BE$ \na \na (0,2) E (a)(b)(c) abc (0,3)(0,1)(1,2)(2,3) BE xxx xxx (0,3) E1 (^|[ (,;])((([Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))([^0-9]|$) feb 6, (0,6) E1 (^|[ (,;])((([Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))([^0-9]|$) 2/7 (0,3) E1 (^|[ (,;])((([Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))([^0-9]|$) feb 1,Feb 6 (5,11) E3 ((((((((((((((((((((((((((((((x)))))))))))))))))))))))))))))) x (0,1)(0,1)(0,1) E3 ((((((((((((((((((((((((((((((x))))))))))))))))))))))))))))))* xx (0,2)(1,2)(1,2) E a?(ab|ba)* ababababababababababababababababababababababababababababababababababababababababa (0,81)(79,81) E abaa|abbaa|abbbaa|abbbbaa ababbabbbabbbabbbbabbbbaa (18,25) E abaa|abbaa|abbbaa|abbbbaa ababbabbbabbbabbbbabaa (18,22) E aaac|aabc|abac|abbc|baac|babc|bbac|bbbc baaabbbabac (7,11) BE$ .* \x01\xff (0,2) E aaaa|bbbb|cccc|ddddd|eeeeee|fffffff|gggg|hhhh|iiiii|jjjjj|kkkkk|llll XaaaXbbbXcccXdddXeeeXfffXgggXhhhXiiiXjjjXkkkXlllXcbaXaaaa (53,57) L aaaa\nbbbb\ncccc\nddddd\neeeeee\nfffffff\ngggg\nhhhh\niiiii\njjjjj\nkkkkk\nllll XaaaXbbbXcccXdddXeeeXfffXgggXhhhXiiiXjjjXkkkXlllXcbaXaaaa NOMATCH E a*a*a*a*a*b aaaaaaaaab (0,10) BE ^ NULL (0,0) BE $ NULL (0,0) BE ^$ NULL (0,0) BE ^a$ a (0,1) BE abc abc (0,3) BE abc xabcy (1,4) BE abc ababc (2,5) BE ab*c abc (0,3) BE ab*bc abc (0,3) BE ab*bc abbc (0,4) BE ab*bc abbbbc (0,6) E ab+bc abbc (0,4) E ab+bc abbbbc (0,6) E ab?bc abbc (0,4) E ab?bc abc (0,3) E ab?c abc (0,3) BE ^abc$ abc (0,3) BE ^abc abcc (0,3) BE abc$ aabc (1,4) BE ^ abc (0,0) BE $ abc (3,3) BE a.c abc (0,3) BE a.c axc (0,3) BE a.*c axyzc (0,5) BE a[bc]d abd (0,3) BE a[b-d]e ace (0,3) BE a[b-d] aac (1,3) BE a[-b] a- (0,2) BE a[b-] a- (0,2) BE a] a] (0,2) BE a[]]b a]b (0,3) BE a[^bc]d aed (0,3) BE a[^-b]c adc (0,3) BE a[^]b]c adc (0,3) E ab|cd abc (0,2) E ab|cd abcd (0,2) E a\(b a(b (0,3) E a\(*b ab (0,2) E a\(*b a((b (0,4) E ((a)) abc (0,1)(0,1)(0,1) E (a)b(c) abc (0,3)(0,1)(2,3) E a+b+c aabbabc (4,7) E a* aaa (0,3) E (a*)* - (0,0)(0,0) E (a*)+ - (0,0)(0,0) E (a*|b)* - (0,0)(0,0) E (a+|b)* ab (0,2)(1,2) E (a+|b)+ ab (0,2)(1,2) E (a+|b)? ab (0,1)(0,1) BE [^ab]* cde (0,3) E (^)* - (0,0)(0,0) BE a* NULL (0,0) E ([abc])*d abbbcd (0,6)(4,5) E ([abc])*bcd abcd (0,4)(0,1) E a|b|c|d|e e (0,1) E (a|b|c|d|e)f ef (0,2)(0,1) E ((a*|b))* - (0,0)(0,0)(0,0) BE abcd*efg abcdefg (0,7) BE ab* xabyabbbz (1,3) BE ab* xayabbbz (1,2) E (ab|cd)e abcde (2,5)(2,4) BE [abhgefdc]ij hij (0,3) E (a|b)c*d abcd (1,4)(1,2) E (ab|ab*)bc abc (0,3)(0,1) E a([bc]*)c* abc (0,3)(1,3) E a([bc]*)(c*d) abcd (0,4)(1,3)(3,4) E a([bc]+)(c*d) abcd (0,4)(1,3)(3,4) E a([bc]*)(c+d) abcd (0,4)(1,2)(2,4) E a[bcd]*dcdcde adcdcde (0,7) E (ab|a)b*c abc (0,3)(0,2) E ((a)(b)c)(d) abcd (0,4)(0,3)(0,1)(1,2)(3,4) BE [A-Za-z_][A-Za-z0-9_]* alpha (0,5) E ^a(bc+|b[eh])g|.h$ abh (1,3) E (bc+d$|ef*g.|h?i(j|k)) effgz (0,5)(0,5) E (bc+d$|ef*g.|h?i(j|k)) ij (0,2)(0,2)(1,2) E (bc+d$|ef*g.|h?i(j|k)) reffgz (1,6)(1,6) E (((((((((a))))))))) a (0,1)(0,1)(0,1)(0,1)(0,1)(0,1)(0,1)(0,1)(0,1)(0,1) BE multiple words multiple words yeah (0,14) E (.*)c(.*) abcde (0,5)(0,2)(3,5) BE abcd abcd (0,4) E a(bc)d abcd (0,4)(1,3) E a[-]?c ac (0,3) E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Qaddafi (0,15)(?,?)(10,12) E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Mo'ammar Gadhafi (0,16)(?,?)(11,13) E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Kaddafi (0,15)(?,?)(10,12) E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Qadhafi (0,15)(?,?)(10,12) E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Gadafi (0,14)(?,?)(10,11) E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Mu'ammar Qadafi (0,15)(?,?)(11,12) E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Moamar Gaddafi (0,14)(?,?)(9,11) E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Mu'ammar Qadhdhafi (0,18)(?,?)(13,15) E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Khaddafi (0,16)(?,?)(11,13) E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Ghaddafy (0,16)(?,?)(11,13) E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Ghadafi (0,15)(?,?)(11,12) E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Ghaddafi (0,16)(?,?)(11,13) E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muamar Kaddafi (0,14)(?,?)(9,11) E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Quathafi (0,16)(?,?)(11,13) E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Gheddafi (0,16)(?,?)(11,13) E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Moammar Khadafy (0,15)(?,?)(11,12) E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Moammar Qudhafi (0,15)(?,?)(10,12) E a+(b|c)*d+ aabcdd (0,6)(3,4) E ^.+$ vivi (0,4) E ^(.+)$ vivi (0,4)(0,4) E ^([^!.]+).att.com!(.+)$ gryphon.att.com!eby (0,19)(0,7)(16,19) E ^([^!]+!)?([^!]+)$ bas (0,3)(?,?)(0,3) E ^([^!]+!)?([^!]+)$ bar!bas (0,7)(0,4)(4,7) E ^([^!]+!)?([^!]+)$ foo!bas (0,7)(0,4)(4,7) E ^.+!([^!]+!)([^!]+)$ foo!bar!bas (0,11)(4,8)(8,11) E ((foo)|(bar))!bas bar!bas (0,7)(0,3)(?,?)(0,3) E ((foo)|(bar))!bas foo!bar!bas (4,11)(4,7)(?,?)(4,7) E ((foo)|(bar))!bas foo!bas (0,7)(0,3)(0,3) E ((foo)|bar)!bas bar!bas (0,7)(0,3) E ((foo)|bar)!bas foo!bar!bas (4,11)(4,7) E ((foo)|bar)!bas foo!bas (0,7)(0,3)(0,3) E (foo|(bar))!bas bar!bas (0,7)(0,3)(0,3) E (foo|(bar))!bas foo!bar!bas (4,11)(4,7)(4,7) E (foo|(bar))!bas foo!bas (0,7)(0,3) E (foo|bar)!bas bar!bas (0,7)(0,3) E (foo|bar)!bas foo!bar!bas (4,11)(4,7) E (foo|bar)!bas foo!bas (0,7)(0,3) E ^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$ foo!bar!bas (0,11)(0,11)(?,?)(?,?)(4,8)(8,11) E ^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$ bas (0,3)(?,?)(0,3) E ^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$ bar!bas (0,7)(0,4)(4,7) E ^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$ foo!bar!bas (0,11)(?,?)(?,?)(4,8)(8,11) E ^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$ foo!bas (0,7)(0,4)(4,7) E ^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$ bas (0,3)(0,3)(?,?)(0,3) E ^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$ bar!bas (0,7)(0,7)(0,4)(4,7) E ^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$ foo!bar!bas (0,11)(0,11)(?,?)(?,?)(4,8)(8,11) E ^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$ foo!bas (0,7)(0,7)(0,4)(4,7) E .*(/XXX).* /XXX (0,4)(0,4) E .*(\\XXX).* \XXX (0,4)(0,4) E \\XXX \XXX (0,4) E .*(/000).* /000 (0,4)(0,4) E .*(\\000).* \000 (0,4)(0,4) E \\000 \000 (0,4) skytools-3.2.6/lib/test/attregex/data/categorize.dat0000644000000000000000000000443012166266754017426 0ustar NOTE regex implementation categorization 2004-05-31 ?E aa* xaxaax (1,2) POSITION=leftmost ; POSITION=bug ?E (a*)(ab)*(b*) abc (0,2)(0,1)(?,?)(1,2) ASSOCIATIVITY=right |E (a*)(ab)*(b*) abc (0,2)(0,0)(0,2)(2,2) ASSOCIATIVITY=left ; ASSOCIATIVITY=bug ?E ((a*)(ab)*)((b*)(a*)) aba (0,3)(0,2)(0,0)(0,2)(2,3)(2,2)(2,3) SUBEXPRESSION=precedence |E ((a*)(ab)*)((b*)(a*)) aba (0,3)(0,1)(0,1)(?,?)(1,3)(1,2)(2,3) SUBEXPRESSION=grouping ; SUBEXPRESSION=bug ?E (...?.?)* xxxxxx (0,6)(4,6) REPEAT_LONGEST=first |E (...?.?)* xxxxxx (0,6)(2,6) REPEAT_LONGEST=last |E (...?.?)* xxxxxx OK REPEAT_LONGEST=unknown ; REPEAT_LONGEST=bug ?E (a|ab)(bc|c) abcabc (0,3)(0,2)(2,3) EXPECTED |E (a|ab)(bc|c) abcabc (0,3)(0,1)(1,3) BUG=alternation-order ; BUG=alternation-order-UNKNOWN ?E (aba|a*b)(aba|a*b) ababa (0,5)(0,2)(2,5) EXPECTED |E (aba|a*b)(aba|a*b) ababa (0,4)(0,3)(3,4) BUG=first-match ; BUG=unknown-match ?B a\(b\)*\1 a NOMATCH EXPECTED |B a\(b\)*\1 a (0,1) BUG=nomatch-match |B a\(b\)*\1 abab (0,2)(1,2) # BUG=repeat-any ; BUG=nomatch-match-UNKNOWN ?E (a*){2} xxxxx (0,0)(0,0) EXPECTED |E (a*){2} xxxxx (5,5)(5,5) BUG=range-null ; BUG=range-null-UNKNOWN ?B a\(b\)*\1 abab NOMATCH EXPECTED |B a\(b\)*\1 abab (0,1) # BUG=nomatch-match |B a\(b\)*\1 abab (0,2)(1,2) BUG=repeat-any ; BUG=repeat-any-UNKNOWN ?E (a*)* a (0,1)(0,1) EXPECTED |E (a*)* ax (0,1)(0,1) BUG=repeat-null-unknown |E (a*)* a (0,1)(1,1) BUG=repeat-null ; BUG=repeat-null-UNKNOWN ?E (aba|a*b)* ababa (0,5)(2,5) EXPECTED |E (aba|a*b)* ababa (0,5)(3,4) BUG=repeat-short |E (aba|a*b)* ababa (0,4)(3,4) # LENGTH=first ; BUG=repeat-short-UNKNOWN ?E (a(b)?)+ aba (0,3)(2,3) EXPECTED |E (a(b)?)+ aba (0,3)(2,3)(1,2) BUG=repeat-artifact ; BUG=repeat-artifact-UNKNOWN ?B \(a\(b\)*\)*\2 abab NOMATCH EXPECTED |B \(a\(b\)*\)*\2 abab (0,4)(2,3)(1,2) BUG=repeat-artifact-nomatch ; BUG=repeat-artifact-nomatch-UNKNOWN ?E (a?)((ab)?)(b?)a?(ab)?b? abab (0,4)(0,1)(1,1)(?,?)(1,2)(2,4) BUG=subexpression-first |E .*(.*) ab (0,2)(2,2) EXPECTED |E .*(.*) ab (0,2)(0,2) BUG=subexpression-first ; BUG=subexpression-first-UNKNOWN skytools-3.2.6/lib/test/attregex/data/forcedassoc.dat0000644000000000000000000000241412166266754017565 0ustar NOTE left-assoc:pass-all right-assoc:pass-all : 2002-04-29 E (a|ab)(c|bcd) abcd (0,4)(0,1)(1,4) E (a|ab)(bcd|c) abcd (0,4)(0,1)(1,4) E (ab|a)(c|bcd) abcd (0,4)(0,1)(1,4) E (ab|a)(bcd|c) abcd (0,4)(0,1)(1,4) E ((a|ab)(c|bcd))(d*) abcd (0,4)(0,4)(0,1)(1,4)(4,4) E ((a|ab)(bcd|c))(d*) abcd (0,4)(0,4)(0,1)(1,4)(4,4) E ((ab|a)(c|bcd))(d*) abcd (0,4)(0,4)(0,1)(1,4)(4,4) E ((ab|a)(bcd|c))(d*) abcd (0,4)(0,4)(0,1)(1,4)(4,4) E (a|ab)((c|bcd)(d*)) abcd (0,4)(0,2)(2,4)(2,3)(3,4) E (a|ab)((bcd|c)(d*)) abcd (0,4)(0,2)(2,4)(2,3)(3,4) E (ab|a)((c|bcd)(d*)) abcd (0,4)(0,2)(2,4)(2,3)(3,4) E (ab|a)((bcd|c)(d*)) abcd (0,4)(0,2)(2,4)(2,3)(3,4) E (a*)(b|abc) abc (0,3)(0,0)(0,3) E (a*)(abc|b) abc (0,3)(0,0)(0,3) E ((a*)(b|abc))(c*) abc (0,3)(0,3)(0,0)(0,3)(3,3) E ((a*)(abc|b))(c*) abc (0,3)(0,3)(0,0)(0,3)(3,3) E (a*)((b|abc)(c*)) abc (0,3)(0,1)(1,3)(1,2)(2,3) E (a*)((abc|b)(c*)) abc (0,3)(0,1)(1,3)(1,2)(2,3) E (a*)(b|abc) abc (0,3)(0,0)(0,3) E (a*)(abc|b) abc (0,3)(0,0)(0,3) E ((a*)(b|abc))(c*) abc (0,3)(0,3)(0,0)(0,3)(3,3) E ((a*)(abc|b))(c*) abc (0,3)(0,3)(0,0)(0,3)(3,3) E (a*)((b|abc)(c*)) abc (0,3)(0,1)(1,3)(1,2)(2,3) E (a*)((abc|b)(c*)) abc (0,3)(0,1)(1,3)(1,2)(2,3) E (a|ab) ab (0,2)(0,2) E (ab|a) ab (0,2)(0,2) E (a|ab)(b*) ab (0,2)(0,2)(2,2) E (ab|a)(b*) ab (0,2)(0,2)(2,2) skytools-3.2.6/lib/test/attregex/data/interpretation.dat0000644000000000000000000000760712166266754020352 0ustar :RE#01:E a+ xaax (1,3) :RE#02:B .\(a*\). xaax (0,4)(1,3) :RE#03:E (a?)((ab)?) ab (0,2)(0,0)(0,2)(0,2) :RE#04:E (a?)((ab)?)(b?) ab (0,2)(0,1)(1,1)(?,?)(1,2) :RE#05:E ((a?)((ab)?))(b?) ab (0,2)(0,2)(0,0)(0,2)(0,2)(2,2) :RE#06:E (a?)(((ab)?)(b?)) ab (0,2)(0,1)(1,2)(1,1)(?,?)(1,2) :RE#07:E (.?) x (0,1)(0,1) :RE#08:E (.?){1} x (0,1)(0,1) :RE#09:E (.?)(.?) x (0,1)(0,1)(1,1) :RE#10:E (.?){2} x (0,1)(1,1) :RE#11:E (.?)* x (0,1)(0,1) :RE#12:E (.?.?) xxx (0,2)(0,2) :RE#13:E (.?.?){1} xxx (0,2)(0,2) :RE#14:E (.?.?)(.?.?) xxx (0,3)(0,2)(2,3) :RE#15:E (.?.?){2} xxx (0,3)(2,3) :RE#16:E (.?.?)(.?.?)(.?.?) xxx (0,3)(0,2)(2,3)(3,3) :RE#17:E (.?.?){3} xxx (0,3)(3,3) :RE#18:E (.?.?)* xxx (0,3)(2,3) :RE#19:E a?((ab)?)(b?) ab (0,2)(1,1)(?,?)(1,2) :RE#20:E (a?)((ab)?)b? ab (0,2)(0,1)(1,1)(?,?) :RE#21:E a?((ab)?)b? ab (0,2)(1,1)(?,?) :RE#22:E (a*){2} xxxxx (0,0)(0,0) :RE#23:E (ab?)(b?a) aba (0,3)(0,2)(2,3) :RE#24:E (a|ab)(ba|a) aba (0,3)(0,2)(2,3) :RE#25:E (a|ab|ba) aba (0,2)(0,2) :RE#26:E (a|ab|ba)(a|ab|ba) aba (0,3)(0,2)(2,3) :RE#27:E (a|ab|ba)* aba (0,3)(2,3) :RE#28:E (aba|a*b) ababa (0,3)(0,3) :RE#29:E (aba|a*b)(aba|a*b) ababa (0,5)(0,2)(2,5) :RE#30:E (aba|a*b)* ababa (0,5)(2,5) :RE#31:E (aba|ab|a) ababa (0,3)(0,3) :RE#32:E (aba|ab|a)(aba|ab|a) ababa (0,5)(0,2)(2,5) :RE#33:E (aba|ab|a)* ababa (0,5)(2,5) :RE#34:E (a(b)?) aba (0,2)(0,2)(1,2) :RE#35:E (a(b)?)(a(b)?) aba (0,3)(0,2)(1,2)(2,3)(?,?) :RE#36:E (a(b)?)+ aba (0,3)(2,3)(?,?) :RE#37:E (.*)(.*) xx (0,2)(0,2)(2,2) :RE#38:E .*(.*) xx (0,2)(2,2) :RE#39:E (a.*z|b.*y) azbazby (0,5)(0,5) :RE#40:E (a.*z|b.*y)(a.*z|b.*y) azbazby (0,7)(0,5)(5,7) :RE#41:E (a.*z|b.*y)* azbazby (0,7)(5,7) :RE#42:E (.|..)(.*) ab (0,2)(0,2)(2,2) :RE#43:E ((..)*(...)*) xxx (0,3)(0,3)(?,?)(0,3) :RE#44:E ((..)*(...)*)((..)*(...)*) xxx (0,3)(0,3)(?,?)(0,3)(3,3)(?,?) :RE#45:E ((..)*(...)*)* xxx (0,3)(0,3)(?,?)(0,3) :RE#46:B \(a\{0,1\}\)*b\1 ab (0,2)(1,1) :RE#47:B \(a*\)*b\1 ab (0,2)(1,1) :RE#48:B \(a*\)b\1* ab (0,2)(0,1) :RE#49:B \(a*\)*b\1* ab (0,2)(1,1) :RE#50:B \(a\{0,1\}\)*b\(\1\) ab (0,2)(1,1)(2,2) :RE#51:B \(a*\)*b\(\1\) ab (0,2)(1,1)(2,2) :RE#52:B \(a*\)b\(\1\)* ab (0,2)(0,1)(?,?) :RE#53:B \(a*\)*b\(\1\)* ab (0,2)(1,1)(2,2) :RE#54:B \(a\{0,1\}\)*b\1 aba (0,3)(0,1) :RE#55:B \(a*\)*b\1 aba (0,3)(0,1) :RE#56:B \(a*\)b\1* aba (0,3)(0,1) :RE#57:B \(a*\)*b\1* aba (0,3)(0,1) :RE#58:B \(a*\)*b\(\1\)* aba (0,3)(0,1)(2,3) :RE#59:B \(a\{0,1\}\)*b\1 abaa (0,3)(0,1) :RE#60:B \(a*\)*b\1 abaa (0,3)(0,1) :RE#61:B \(a*\)b\1* abaa (0,4)(0,1) :RE#62:B \(a*\)*b\1* abaa (0,4)(0,1) :RE#63:B \(a*\)*b\(\1\)* abaa (0,4)(0,1)(3,4) :RE#64:B \(a\{0,1\}\)*b\1 aab (0,3)(2,2) :RE#65:B \(a*\)*b\1 aab (0,3)(2,2) :RE#66:B \(a*\)b\1* aab (0,3)(0,2) :RE#67:B \(a*\)*b\1* aab (0,3)(2,2) :RE#68:B \(a*\)*b\(\1\)* aab (0,3)(2,2)(3,3) :RE#69:B \(a\{0,1\}\)*b\1 aaba (0,4)(1,2) :RE#70:B \(a*\)*b\1 aaba (0,4)(1,2) :RE#71:B \(a*\)b\1* aaba (0,3)(0,2) :RE#72:B \(a*\)*b\1* aaba (0,4)(1,2) :RE#73:B \(a*\)*b\(\1\)* aaba (0,4)(1,2)(3,4) :RE#74:B \(a\{0,1\}\)*b\1 aabaa (0,4)(1,2) :RE#75:B \(a*\)*b\1 aabaa (0,5)(0,2) :RE#76:B \(a*\)b\1* aabaa (0,5)(0,2) :RE#77:B \(a*\)*b\1* aabaa (0,5)(0,2) :RE#78:B \(a*\)*b\(\1\)* aabaa (0,5)(0,2)(3,5) :RE#79:B \(x\)*a\1 a NOMATCH :RE#80:B \(x\)*a\1* a (0,1)(?,?) :RE#81:B \(x\)*a\(\1\) a NOMATCH :RE#82:B \(x\)*a\(\1\)* a (0,1)(?,?)(?,?) :RE#83:E (aa(b(b))?)+ aabbaa (0,6)(4,6)(?,?)(?,?) :RE#84:E (a(b)?)+ aba (0,3)(2,3)(?,?) :RE#85:E ([ab]+)([bc]+)([cd]*) abcd (0,4)(0,2)(2,3)(3,4) :RE#86:B \([ab]*\)\([bc]*\)\([cd]*\)\1 abcdaa (0,5)(0,1)(1,3)(3,4) :RE#87:B \([ab]*\)\([bc]*\)\([cd]*\)\1 abcdab (0,6)(0,2)(2,3)(3,4) :RE#88:B \([ab]*\)\([bc]*\)\([cd]*\)\1* abcdaa (0,6)(0,1)(1,3)(3,4) :RE#89:B \([ab]*\)\([bc]*\)\([cd]*\)\1* abcdab (0,6)(0,2)(2,3)(3,4) :RE#90:E ^(A([^B]*))?(B(.*))? Aa (0,2)(0,2)(1,2) :RE#91:E ^(A([^B]*))?(B(.*))? Bb (0,2)(?,?)(?,?)(0,2)(1,2) :RE#92:B .*\([AB]\).*\1 ABA (0,3)(0,1) :RE#93:B$ [^A]*A \nA (0,2) skytools-3.2.6/lib/test/attregex/data/leftassoc.dat0000644000000000000000000000114712166266754017257 0ustar NOTE left-assoc:pass-all right-assoc:pass-none : 2002-04-29 E (a|ab)(c|bcd)(d*) abcd (0,4)(0,1)(1,4)(4,4) E (a|ab)(bcd|c)(d*) abcd (0,4)(0,1)(1,4)(4,4) E (ab|a)(c|bcd)(d*) abcd (0,4)(0,1)(1,4)(4,4) E (ab|a)(bcd|c)(d*) abcd (0,4)(0,1)(1,4)(4,4) E (a*)(b|abc)(c*) abc (0,3)(0,0)(0,3)(3,3) E (a*)(abc|b)(c*) abc (0,3)(0,0)(0,3)(3,3) E (a*)(b|abc)(c*) abc (0,3)(0,0)(0,3)(3,3) E (a*)(abc|b)(c*) abc (0,3)(0,0)(0,3)(3,3) E (a|ab)(c|bcd)(d|.*) abcd (0,4)(0,1)(1,4)(4,4) E (a|ab)(bcd|c)(d|.*) abcd (0,4)(0,1)(1,4)(4,4) E (ab|a)(c|bcd)(d|.*) abcd (0,4)(0,1)(1,4)(4,4) E (ab|a)(bcd|c)(d|.*) abcd (0,4)(0,1)(1,4)(4,4) skytools-3.2.6/lib/test/attregex/data/rightassoc.dat0000644000000000000000000000114712166266754017442 0ustar NOTE left-assoc:pass-none right-assoc:pass-all : 2002-04-29 E (a|ab)(c|bcd)(d*) abcd (0,4)(0,2)(2,3)(3,4) E (a|ab)(bcd|c)(d*) abcd (0,4)(0,2)(2,3)(3,4) E (ab|a)(c|bcd)(d*) abcd (0,4)(0,2)(2,3)(3,4) E (ab|a)(bcd|c)(d*) abcd (0,4)(0,2)(2,3)(3,4) E (a*)(b|abc)(c*) abc (0,3)(0,1)(1,2)(2,3) E (a*)(abc|b)(c*) abc (0,3)(0,1)(1,2)(2,3) E (a*)(b|abc)(c*) abc (0,3)(0,1)(1,2)(2,3) E (a*)(abc|b)(c*) abc (0,3)(0,1)(1,2)(2,3) E (a|ab)(c|bcd)(d|.*) abcd (0,4)(0,2)(2,3)(3,4) E (a|ab)(bcd|c)(d|.*) abcd (0,4)(0,2)(2,3)(3,4) E (ab|a)(c|bcd)(d|.*) abcd (0,4)(0,2)(2,3)(3,4) E (ab|a)(bcd|c)(d|.*) abcd (0,4)(0,2)(2,3)(3,4) skytools-3.2.6/lib/test/attregex/run.sh0000755000000000000000000000073212166266754015033 0ustar #! /bin/sh rxtest=./testregex.libc rxtest=./testregex.usual tests="basic.dat categorize.dat nullsubexpr.dat" tests="$tests rightassoc.dat" #tests="$tests leftassoc.dat" tests="$tests forcedassoc.dat" tests="$tests repetition.dat" tests="$tests interpretation.dat" for t in $tests; do printf "%-20s" "$t" #$rxtest < data/$t | grep -vE '(NOTE|Research)' $rxtest < data/$t | tail -n +4 | grep -vE 'haskell|mimi|NOTE' done #$rxtest < data/categorize.dat | tail -n +4 skytools-3.2.6/lib/test/attregex/testregex.c0000644000000000000000000014671712166266754016064 0ustar #pragma prototyped noticed /* * regex(3) test harness * * build: cc -o testregex testregex.c * help: testregex --man * note: REG_* features are detected by #ifdef; if REG_* are enums * then supply #define REG_foo REG_foo for each enum REG_foo * * Glenn Fowler * AT&T Research * * PLEASE: publish your tests so everyone can benefit * * The following license covers testregex.c and all associated test data. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of THIS SOFTWARE FILE (the "Software"), to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, and/or sell copies of the * Software, and to permit persons to whom the Software is furnished to do * so, subject to the following disclaimer: * * THIS SOFTWARE IS PROVIDED BY AT&T ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL AT&T BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ static const char id[] = "\n@(#)$Id: testregex (AT&T Research) 2010-06-10 $\0\n"; #if _PACKAGE_ast #include #else #include #endif #include #include #include #include #include #include #ifdef __STDC__ #include #include #endif #define getline(x) xgetline(x) #include #ifdef USUAL #include #else #include #endif #ifndef RE_DUP_MAX #define RE_DUP_MAX 32767 #endif #if !_PACKAGE_ast #undef REG_DISCIPLINE #endif #ifndef REG_DELIMITED #undef _REG_subcomp #endif #define TEST_ARE 0x00000001 #define TEST_BRE 0x00000002 #define TEST_ERE 0x00000004 #define TEST_KRE 0x00000008 #define TEST_LRE 0x00000010 #define TEST_SRE 0x00000020 #define TEST_EXPAND 0x00000100 #define TEST_LENIENT 0x00000200 #define TEST_QUERY 0x00000400 #define TEST_SUB 0x00000800 #define TEST_UNSPECIFIED 0x00001000 #define TEST_VERIFY 0x00002000 #define TEST_AND 0x00004000 #define TEST_OR 0x00008000 #define TEST_DELIMIT 0x00010000 #define TEST_OK 0x00020000 #define TEST_SAME 0x00040000 #define TEST_ACTUAL 0x00100000 #define TEST_BASELINE 0x00200000 #define TEST_FAIL 0x00400000 #define TEST_PASS 0x00800000 #define TEST_SUMMARY 0x01000000 #define TEST_IGNORE_ERROR 0x02000000 #define TEST_IGNORE_OVER 0x04000000 #define TEST_IGNORE_POSITION 0x08000000 #define TEST_CATCH 0x10000000 #define TEST_VERBOSE 0x20000000 #define TEST_DECOMP 0x40000000 #define TEST_GLOBAL (TEST_ACTUAL|TEST_AND|TEST_BASELINE|TEST_CATCH|TEST_FAIL|TEST_IGNORE_ERROR|TEST_IGNORE_OVER|TEST_IGNORE_POSITION|TEST_OR|TEST_PASS|TEST_SUMMARY|TEST_VERBOSE) #ifdef REG_DISCIPLINE #include typedef struct Disc_s { regdisc_t disc; int ordinal; Sfio_t* sp; } Disc_t; static void* compf(const regex_t* re, const char* xstr, size_t xlen, regdisc_t* disc) { Disc_t* dp = (Disc_t*)disc; return (void*)((char*)0 + ++dp->ordinal); } static int execf(const regex_t* re, void* data, const char* xstr, size_t xlen, const char* sstr, size_t slen, char** snxt, regdisc_t* disc) { Disc_t* dp = (Disc_t*)disc; sfprintf(dp->sp, "{%-.*s}(%lu:%d)", xlen, xstr, (char*)data - (char*)0, slen); return atoi(xstr); } static void* resizef(void* handle, void* data, size_t size) { if (!size) return 0; return stkalloc((Sfio_t*)handle, size); } #endif #ifndef NiL #ifdef __STDC__ #define NiL 0 #else #define NiL (char*)0 #endif #endif #define H(x) do{if(html)fprintf(stderr,x);}while(0) #define T(x) fprintf(stderr,x) static void help(int html) { H("\n"); H("\n"); H("\n"); H("testregex man document\n"); H("\n"); H("\n"); H("
\n");
T("NAME\n");
T("  testregex - regex(3) test harness\n");
T("\n");
T("SYNOPSIS\n");
T("  testregex [ options ]\n");
T("\n");
T("DESCRIPTION\n");
T("  testregex reads regex(3) test specifications, one per line, from the\n");
T("  standard input and writes one output line for each failed test. A\n");
T("  summary line is written after all tests are done. Each successful\n");
T("  test is run again with REG_NOSUB. Unsupported features are noted\n");
T("  before the first test, and tests requiring these features are\n");
T("  silently ignored.\n");
T("\n");
T("OPTIONS\n");
T("  -c	catch signals and non-terminating calls\n");
T("  -e	ignore error return mismatches\n");
T("  -h	list help on standard error\n");
T("  -n	do not repeat successful tests with regnexec()\n");
T("  -o	ignore match[] overrun errors\n");
T("  -p	ignore negative position mismatches\n");
T("  -s	use stack instead of malloc\n");
T("  -x	do not repeat successful tests with REG_NOSUB\n");
T("  -v	list each test line\n");
T("  -A	list failed test lines with actual answers\n");
T("  -B	list all test lines with actual answers\n");
T("  -F	list failed test lines\n");
T("  -P	list passed test lines\n");
T("  -S	output one summary line\n");
T("\n");
T("INPUT FORMAT\n");
T("  Input lines may be blank, a comment beginning with #, or a test\n");
T("  specification. A specification is five fields separated by one\n");
T("  or more tabs. NULL denotes the empty string and NIL denotes the\n");
T("  0 pointer.\n");
T("\n");
T("  Field 1: the regex(3) flags to apply, one character per REG_feature\n");
T("  flag. The test is skipped if REG_feature is not supported by the\n");
T("  implementation. If the first character is not [BEASKLP] then the\n");
T("  specification is a global control line. One or more of [BEASKLP] may be\n");
T("  specified; the test will be repeated for each mode.\n");
T("\n");
T("    B 	basic			BRE	(grep, ed, sed)\n");
T("    E 	REG_EXTENDED		ERE	(egrep)\n");
T("    A	REG_AUGMENTED		ARE	(egrep with negation)\n");
T("    S	REG_SHELL		SRE	(sh glob)\n");
T("    K	REG_SHELL|REG_AUGMENTED	KRE	(ksh glob)\n");
T("    L	REG_LITERAL		LRE	(fgrep)\n");
T("\n");
T("    a	REG_LEFT|REG_RIGHT	implicit ^...$\n");
T("    b	REG_NOTBOL		lhs does not match ^\n");
T("    c	REG_COMMENT		ignore space and #...\\n\n");
T("    d	REG_SHELL_DOT		explicit leading . match\n");
T("    e	REG_NOTEOL		rhs does not match $\n");
T("    f	REG_MULTIPLE		multiple \\n separated patterns\n");
T("    g	FNM_LEADING_DIR		testfnmatch only -- match until /\n");
T("    h	REG_MULTIREF		multiple digit backref\n");
T("    i	REG_ICASE		ignore case\n");
T("    j	REG_SPAN		. matches \\n\n");
T("    k	REG_ESCAPE		\\ to ecape [...] delimiter\n");
T("    l	REG_LEFT		implicit ^...\n");
T("    m	REG_MINIMAL		minimal match\n");
T("    n	REG_NEWLINE		explicit \\n match\n");
T("    o	REG_ENCLOSED		(|&) magic inside [@|&](...)\n");
T("    p	REG_SHELL_PATH		explicit / match\n");
T("    q	REG_DELIMITED		delimited pattern\n");
T("    r	REG_RIGHT		implicit ...$\n");
T("    s	REG_SHELL_ESCAPED	\\ not special\n");
T("    t	REG_MUSTDELIM		all delimiters must be specified\n");
T("    u	standard unspecified behavior -- errors not counted\n");
T("    v	REG_CLASS_ESCAPE	\\ special inside [...]\n");
T("    w	REG_NOSUB		no subexpression match array\n");
T("    x	REG_LENIENT		let some errors slide\n");
T("    y	REG_LEFT		regexec() implicit ^...\n");
T("    z	REG_NULL		NULL subexpressions ok\n");
T("    $	                        expand C \\c escapes in fields 2 and 3\n");
T("    /	                        field 2 is a regsubcomp() expression\n");
T("    =	                        field 3 is a regdecomp() expression\n");
T("\n");
T("  Field 1 control lines:\n");
T("\n");
T("    C		set LC_COLLATE and LC_CTYPE to locale in field 2\n");
T("\n");
T("    ?test ...	output field 5 if passed and != EXPECTED, silent otherwise\n");
T("    &test ...	output field 5 if current and previous passed\n");
T("    |test ...	output field 5 if current passed and previous failed\n");
T("    ; ...	output field 2 if previous failed\n");
T("    {test ...	skip if failed until }\n");
T("    }		end of skip\n");
T("\n");
T("    : comment		comment copied as output NOTE\n");
T("    :comment:test	:comment: ignored\n");
T("    N[OTE] comment	comment copied as output NOTE\n");
T("    T[EST] comment	comment\n");
T("\n");
T("    number		use number for nmatch (20 by default)\n");
T("\n");
T("  Field 2: the regular expression pattern; SAME uses the pattern from\n");
T("    the previous specification. RE_DUP_MAX inside {...} expands to the\n");
T("    value from .\n");
T("\n");
T("  Field 3: the string to match. X...{RE_DUP_MAX} expands to RE_DUP_MAX\n");
T("    copies of X.\n");
T("\n");
T("  Field 4: the test outcome. This is either one of the posix error\n");
T("    codes (with REG_ omitted) or the match array, a list of (m,n)\n");
T("    entries with m and n being first and last+1 positions in the\n");
T("    field 3 string, or NULL if REG_NOSUB is in effect and success\n");
T("    is expected. BADPAT is acceptable in place of any regcomp(3)\n");
T("    error code. The match[] array is initialized to (-2,-2) before\n");
T("    each test. All array elements from 0 to nmatch-1 must be specified\n");
T("    in the outcome. Unspecified endpoints (offset -1) are denoted by ?.\n");
T("    Unset endpoints (offset -2) are denoted by X. {x}(o:n) denotes a\n");
T("    matched (?{...}) expression, where x is the text enclosed by {...},\n");
T("    o is the expression ordinal counting from 1, and n is the length of\n");
T("    the unmatched portion of the subject string. If x starts with a\n");
T("    number then that is the return value of re_execf(), otherwise 0 is\n");
T("    returned. RE_DUP_MAX[-+]N expands to the  value -+N.\n");
T("\n");
T("  Field 5: optional comment appended to the report.\n");
T("\n");
T("CAVEAT\n");
T("    If a regex implementation misbehaves with memory then all bets are off.\n");
T("\n");
T("CONTRIBUTORS\n");
T("  Glenn Fowler    gsf@research.att.com        (ksh strmatch, regex extensions)\n");
T("  David Korn      dgk@research.att.com        (ksh glob matcher)\n");
T("  Doug McIlroy    mcilroy@dartmouth.edu       (ast regex/testre in C++)\n");
T("  Tom Lord        lord@regexps.com            (rx tests)\n");
T("  Henry Spencer   henry@zoo.toronto.edu       (original public regex)\n");
T("  Andrew Hume     andrew@research.att.com     (gre tests)\n");
T("  John Maddock    John_Maddock@compuserve.com (regex++ tests)\n");
T("  Philip Hazel    ph10@cam.ac.uk              (pcre tests)\n");
T("  Ville Laurikari vl@iki.fi                   (libtre tests)\n");
H("
\n"); H("\n"); H("\n"); } #ifndef elementsof #define elementsof(x) (sizeof(x)/sizeof(x[0])) #endif #ifndef streq #define streq(a,b) (*(a)==*(b)&&!strcmp(a,b)) #endif #define HUNG 2 #define NOTEST (~0) #ifndef REG_TEST_DEFAULT #define REG_TEST_DEFAULT 0 #endif #ifndef REG_EXEC_DEFAULT #define REG_EXEC_DEFAULT 0 #endif static const char* unsupported[] = { "BASIC", #ifndef REG_EXTENDED "EXTENDED", #endif #ifndef REG_AUGMENTED "AUGMENTED", #endif #ifndef REG_SHELL "SHELL", #endif #ifndef REG_CLASS_ESCAPE "CLASS_ESCAPE", #endif #ifndef REG_COMMENT "COMMENT", #endif #ifndef REG_DELIMITED "DELIMITED", #endif #ifndef REG_DISCIPLINE "DISCIPLINE", #endif #ifndef REG_ESCAPE "ESCAPE", #endif #ifndef REG_ICASE "ICASE", #endif #ifndef REG_LEFT "LEFT", #endif #ifndef REG_LENIENT "LENIENT", #endif #ifndef REG_LITERAL "LITERAL", #endif #ifndef REG_MINIMAL "MINIMAL", #endif #ifndef REG_MULTIPLE "MULTIPLE", #endif #ifndef REG_MULTIREF "MULTIREF", #endif #ifndef REG_MUSTDELIM "MUSTDELIM", #endif #ifndef REG_NEWLINE "NEWLINE", #endif #ifndef REG_NOTBOL "NOTBOL", #endif #ifndef REG_NOTEOL "NOTEOL", #endif #ifndef REG_NULL "NULL", #endif #ifndef REG_RIGHT "RIGHT", #endif #ifndef REG_SHELL_DOT "SHELL_DOT", #endif #ifndef REG_SHELL_ESCAPED "SHELL_ESCAPED", #endif #ifndef REG_SHELL_GROUP "SHELL_GROUP", #endif #ifndef REG_SHELL_PATH "SHELL_PATH", #endif #ifndef REG_SPAN "SPAN", #endif #if REG_NOSUB & REG_TEST_DEFAULT "SUBMATCH", #endif #if !_REG_nexec "regnexec", #endif #if !_REG_subcomp "regsubcomp", #endif #if !_REG_decomp "redecomp", #endif 0 }; #ifndef REG_CLASS_ESCAPE #define REG_CLASS_ESCAPE NOTEST #endif #ifndef REG_COMMENT #define REG_COMMENT NOTEST #endif #ifndef REG_DELIMITED #define REG_DELIMITED NOTEST #endif #ifndef REG_ESCAPE #define REG_ESCAPE NOTEST #endif #ifndef REG_ICASE #define REG_ICASE NOTEST #endif #ifndef REG_LEFT #define REG_LEFT NOTEST #endif #ifndef REG_LENIENT #define REG_LENIENT 0 #endif #ifndef REG_MINIMAL #define REG_MINIMAL NOTEST #endif #ifndef REG_MULTIPLE #define REG_MULTIPLE NOTEST #endif #ifndef REG_MULTIREF #define REG_MULTIREF NOTEST #endif #ifndef REG_MUSTDELIM #define REG_MUSTDELIM NOTEST #endif #ifndef REG_NEWLINE #define REG_NEWLINE NOTEST #endif #ifndef REG_NOTBOL #define REG_NOTBOL NOTEST #endif #ifndef REG_NOTEOL #define REG_NOTEOL NOTEST #endif #ifndef REG_NULL #define REG_NULL NOTEST #endif #ifndef REG_RIGHT #define REG_RIGHT NOTEST #endif #ifndef REG_SHELL_DOT #define REG_SHELL_DOT NOTEST #endif #ifndef REG_SHELL_ESCAPED #define REG_SHELL_ESCAPED NOTEST #endif #ifndef REG_SHELL_GROUP #define REG_SHELL_GROUP NOTEST #endif #ifndef REG_SHELL_PATH #define REG_SHELL_PATH NOTEST #endif #ifndef REG_SPAN #define REG_SPAN NOTEST #endif #define REG_UNKNOWN (-1) #ifndef REG_ENEWLINE #define REG_ENEWLINE (REG_UNKNOWN-1) #endif #ifndef REG_ENULL #ifndef REG_EMPTY #define REG_ENULL (REG_UNKNOWN-2) #else #define REG_ENULL REG_EMPTY #endif #endif #ifndef REG_ECOUNT #define REG_ECOUNT (REG_UNKNOWN-3) #endif #ifndef REG_BADESC #define REG_BADESC (REG_UNKNOWN-4) #endif #ifndef REG_EMEM #define REG_EMEM (REG_UNKNOWN-5) #endif #ifndef REG_EHUNG #define REG_EHUNG (REG_UNKNOWN-6) #endif #ifndef REG_EBUS #define REG_EBUS (REG_UNKNOWN-7) #endif #ifndef REG_EFAULT #define REG_EFAULT (REG_UNKNOWN-8) #endif #ifndef REG_EFLAGS #define REG_EFLAGS (REG_UNKNOWN-9) #endif #ifndef REG_EDELIM #define REG_EDELIM (REG_UNKNOWN-9) #endif static const struct { int code; char* name; } codes[] = { REG_UNKNOWN, "UNKNOWN", REG_NOMATCH, "NOMATCH", REG_BADPAT, "BADPAT", REG_ECOLLATE, "ECOLLATE", REG_ECTYPE, "ECTYPE", REG_EESCAPE, "EESCAPE", REG_ESUBREG, "ESUBREG", REG_EBRACK, "EBRACK", REG_EPAREN, "EPAREN", REG_EBRACE, "EBRACE", REG_BADBR, "BADBR", REG_ERANGE, "ERANGE", REG_ESPACE, "ESPACE", REG_BADRPT, "BADRPT", REG_ENEWLINE, "ENEWLINE", REG_ENULL, "ENULL", REG_ECOUNT, "ECOUNT", REG_BADESC, "BADESC", REG_EMEM, "EMEM", REG_EHUNG, "EHUNG", REG_EBUS, "EBUS", REG_EFAULT, "EFAULT", REG_EFLAGS, "EFLAGS", REG_EDELIM, "EDELIM", }; static struct { regmatch_t NOMATCH; int errors; int extracted; int ignored; int lineno; int passed; int signals; int unspecified; int verify; int warnings; char* file; char* stack; char* which; jmp_buf gotcha; #ifdef REG_DISCIPLINE Disc_t disc; #endif } state; static void quote(char* s, int len, unsigned long test) { unsigned char* u = (unsigned char*)s; unsigned char* e; int c; #ifdef MB_CUR_MAX int w; #endif if (!u) printf("NIL"); else if (!*u && len <= 1) printf("NULL"); else if (test & TEST_EXPAND) { if (len < 0) len = strlen((char*)u); e = u + len; if (test & TEST_DELIMIT) printf("\""); while (u < e) switch (c = *u++) { case '\\': printf("\\\\"); break; case '"': if (test & TEST_DELIMIT) printf("\\\""); else printf("\""); break; case '\a': printf("\\a"); break; case '\b': printf("\\b"); break; case 033: printf("\\e"); break; case '\f': printf("\\f"); break; case '\n': printf("\\n"); break; case '\r': printf("\\r"); break; case '\t': printf("\\t"); break; case '\v': printf("\\v"); break; default: #ifdef MB_CUR_MAX s = (char*)u - 1; if ((w = mblen(s, (char*)e - s)) > 1) { u += w - 1; fwrite(s, 1, w, stdout); } else #endif if (!iscntrl(c) && isprint(c)) putchar(c); else printf("\\x%02x", c); break; } if (test & TEST_DELIMIT) printf("\""); } else printf("%s", s); } static void report(char* comment, char* fun, char* re, char* s, int len, char* msg, int flags, unsigned long test) { if (state.file) printf("%s:", state.file); printf("%d:", state.lineno); if (re) { printf(" "); quote(re, -1, test|TEST_DELIMIT); if (s) { printf(" versus "); quote(s, len, test|TEST_DELIMIT); } } if (test & TEST_UNSPECIFIED) { state.unspecified++; printf(" unspecified behavior"); } else state.errors++; if (state.which) printf(" %s", state.which); if (flags & REG_NOSUB) printf(" NOSUB"); if (fun) printf(" %s", fun); if (comment[strlen(comment)-1] == '\n') printf(" %s", comment); else { printf(" %s: ", comment); if (msg) printf("%s: ", msg); } } static void error(regex_t* preg, int code) { char* msg; char buf[256]; switch (code) { case REG_EBUS: msg = "bus error"; break; case REG_EFAULT: msg = "memory fault"; break; case REG_EHUNG: msg = "did not terminate"; break; default: regerror(code, preg, msg = buf, sizeof buf); break; } printf("%s\n", msg); } static void bad(char* comment, char* re, char* s, int len, unsigned long test) { printf("bad test case "); report(comment, NiL, re, s, len, NiL, 0, test); exit(1); } static int escape(char* s) { char* b; char* t; char* q; char* e; int c; for (b = t = s; *t = *s; s++, t++) if (*s == '\\') switch (*++s) { case '\\': break; case 'a': *t = '\a'; break; case 'b': *t = '\b'; break; case 'c': if (*t = *++s) *t &= 037; else s--; break; case 'e': case 'E': *t = 033; break; case 'f': *t = '\f'; break; case 'n': *t = '\n'; break; case 'r': *t = '\r'; break; case 's': *t = ' '; break; case 't': *t = '\t'; break; case 'v': *t = '\v'; break; case 'u': case 'x': c = 0; q = c == 'u' ? (s + 5) : (char*)0; e = s + 1; while (!e || !q || s < q) { switch (*++s) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': c = (c << 4) + *s - 'a' + 10; continue; case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': c = (c << 4) + *s - 'A' + 10; continue; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': c = (c << 4) + *s - '0'; continue; case '{': case '[': if (s != e) { s--; break; } e = 0; continue; case '}': case ']': if (e) s--; break; default: s--; break; } break; } *t = c; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': c = *s - '0'; q = s + 2; while (s < q) { switch (*++s) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': c = (c << 3) + *s - '0'; break; default: q = --s; break; } } *t = c; break; default: *(s + 1) = 0; bad("invalid C \\ escape\n", s - 1, NiL, 0, 0); } return t - b; } static void matchoffprint(int off) { switch (off) { case -2: printf("X"); break; case -1: printf("?"); break; default: printf("%d", off); break; } } static void matchprint(regmatch_t* match, int nmatch, int nsub, char* ans, unsigned long test) { int i; for (; nmatch > nsub + 1; nmatch--) if ((match[nmatch-1].rm_so != -1 || match[nmatch-1].rm_eo != -1) && (!(test & TEST_IGNORE_POSITION) || match[nmatch-1].rm_so >= 0 && match[nmatch-1].rm_eo >= 0)) break; for (i = 0; i < nmatch; i++) { printf("("); matchoffprint(match[i].rm_so); printf(","); matchoffprint(match[i].rm_eo); printf(")"); } if (!(test & (TEST_ACTUAL|TEST_BASELINE))) { if (ans) printf(" expected: %s", ans); printf("\n"); } } static int matchcheck(regmatch_t* match, int nmatch, int nsub, char* ans, char* re, char* s, int len, int flags, unsigned long test) { char* p; int i; int m; int n; if (streq(ans, "OK")) return test & (TEST_BASELINE|TEST_PASS|TEST_VERIFY); for (i = 0, p = ans; i < nmatch && *p; i++) { if (*p == '{') { #ifdef REG_DISCIPLINE char* x; if (!(x = sfstruse(state.disc.sp))) bad("out of space [discipline string]\n", NiL, NiL, 0, 0); if (strcmp(p, x)) { if (test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_QUERY|TEST_SUMMARY|TEST_VERIFY)) return 0; report("callout failed", NiL, re, s, len, NiL, flags, test); quote(p, -1, test); printf(" expected, "); quote(x, -1, test); printf(" returned\n"); } #endif break; } if (*p++ != '(') bad("improper answer\n", re, s, -1, test); if (*p == '?') { m = -1; p++; } else if (*p == 'R' && !memcmp(p, "RE_DUP_MAX", 10)) { m = RE_DUP_MAX; p += 10; if (*p == '+' || *p == '-') m += strtol(p, &p, 10); } else m = strtol(p, &p, 10); if (*p++ != ',') bad("improper answer\n", re, s, -1, test); if (*p == '?') { n = -1; p++; } else if (*p == 'R' && !memcmp(p, "RE_DUP_MAX", 10)) { n = RE_DUP_MAX; p += 10; if (*p == '+' || *p == '-') n += strtol(p, &p, 10); } else n = strtol(p, &p, 10); if (*p++ != ')') bad("improper answer\n", re, s, -1, test); if (m!=match[i].rm_so || n!=match[i].rm_eo) { if (!(test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_QUERY|TEST_SUMMARY|TEST_VERIFY))) { report("failed: match was", NiL, re, s, len, NiL, flags, test); matchprint(match, nmatch, nsub, ans, test); } return 0; } } for (; i < nmatch; i++) { if (match[i].rm_so!=-1 || match[i].rm_eo!=-1) { if (!(test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_QUERY|TEST_VERIFY))) { if ((test & TEST_IGNORE_POSITION) && (match[i].rm_so<0 || match[i].rm_eo<0)) { state.ignored++; return 0; } if (!(test & TEST_SUMMARY)) { report("failed: match was", NiL, re, s, len, NiL, flags, test); matchprint(match, nmatch, nsub, ans, test); } } return 0; } } if (!(test & TEST_IGNORE_OVER) && match[nmatch].rm_so != state.NOMATCH.rm_so) { if (!(test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_QUERY|TEST_SUMMARY|TEST_VERIFY))) { report("failed: overran match array", NiL, re, s, len, NiL, flags, test); matchprint(match, nmatch + 1, nsub, NiL, test); } return 0; } return 1; } static void sigunblock(int s) { #ifdef SIG_SETMASK int op; sigset_t mask; sigemptyset(&mask); if (s) { sigaddset(&mask, s); op = SIG_UNBLOCK; } else op = SIG_SETMASK; sigprocmask(op, &mask, NiL); #else #ifdef sigmask sigsetmask(s ? (sigsetmask(0L) & ~sigmask(s)) : 0L); #endif #endif } static void gotcha(int sig) { int ret; signal(sig, gotcha); alarm(0); state.signals++; switch (sig) { case SIGALRM: ret = REG_EHUNG; break; case SIGBUS: ret = REG_EBUS; break; default: ret = REG_EFAULT; break; } sigunblock(sig); longjmp(state.gotcha, ret); } static char* getline(FILE* fp) { static char buf[32 * 1024]; register char* s = buf; register char* e = &buf[sizeof(buf)]; register char* b; for (;;) { if (!(b = fgets(s, e - s, fp))) return 0; state.lineno++; s += strlen(s); if (s == b || *--s != '\n' || s == b || *(s - 1) != '\\') { *s = 0; break; } s--; } return buf; } static unsigned long note(unsigned long level, char* msg, unsigned long skip, unsigned long test) { if (!(test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_SUMMARY)) && !skip) { printf("NOTE\t"); if (msg) printf("%s: ", msg); printf("skipping lines %d", state.lineno); } return skip | level; } #define TABS(n) &ts[7-((n)&7)] static char ts[] = "\t\t\t\t\t\t\t"; static unsigned long extract(int* tabs, char* spec, char* re, char* s, char* ans, char* msg, char* accept, regmatch_t* match, int nmatch, int nsub, unsigned long skip, unsigned long level, unsigned long test) { if (test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_OK|TEST_PASS|TEST_SUMMARY)) { state.extracted = 1; if (test & TEST_OK) { state.passed++; if ((test & TEST_VERIFY) && !(test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_SUMMARY))) { if (msg && strcmp(msg, "EXPECTED")) printf("NOTE\t%s\n", msg); return skip; } test &= ~(TEST_PASS|TEST_QUERY); } if (test & (TEST_QUERY|TEST_VERIFY)) { if (test & TEST_BASELINE) test &= ~(TEST_BASELINE|TEST_PASS); else test |= TEST_PASS; skip |= level; } if (!(test & TEST_OK)) { if (test & TEST_UNSPECIFIED) state.unspecified++; else state.errors++; } if (test & (TEST_PASS|TEST_SUMMARY)) return skip; test &= ~TEST_DELIMIT; printf("%s%s", spec, TABS(*tabs++)); if ((test & (TEST_BASELINE|TEST_SAME)) == (TEST_BASELINE|TEST_SAME)) printf("SAME"); else quote(re, -1, test); printf("%s", TABS(*tabs++)); quote(s, -1, test); printf("%s", TABS(*tabs++)); if (!(test & (TEST_ACTUAL|TEST_BASELINE)) || !accept && !match) printf("%s", ans); else if (accept) printf("%s", accept); else matchprint(match, nmatch, nsub, NiL, test); if (msg) printf("%s%s", TABS(*tabs++), msg); putchar('\n'); } else if (test & TEST_QUERY) skip = note(level, msg, skip, test); else if (test & TEST_VERIFY) state.extracted = 1; return skip; } static int catchfree(regex_t* preg, int flags, int* tabs, char* spec, char* re, char* s, char* ans, char* msg, char* accept, regmatch_t* match, int nmatch, int nsub, unsigned long skip, unsigned long level, unsigned long test) { int eret; if (!(test & TEST_CATCH)) { regfree(preg); eret = 0; } else if (!(eret = setjmp(state.gotcha))) { alarm(HUNG); regfree(preg); alarm(0); } else if (test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_QUERY|TEST_SUMMARY|TEST_VERIFY)) extract(tabs, spec, re, s, ans, msg, NiL, NiL, 0, 0, skip, level, test); else { report("failed", "regfree", re, NiL, -1, msg, flags, test); error(preg, eret); } return eret; } static char* expand(char* os, char* ot) { char* s = os; char* t; int n = 0; int r; long m; for (;;) { switch (*s++) { case 0: break; case '{': n++; continue; case '}': n--; continue; case 'R': if (n == 1 && !memcmp(s, "E_DUP_MAX", 9)) { s--; for (t = ot; os < s; *t++ = *os++); r = ((t - ot) >= 5 && t[-1] == '{' && t[-2] == '.' && t[-3] == '.' && t[-4] == '.') ? t[-5] : 0; os = ot; m = RE_DUP_MAX; if (*(s += 10) == '+' || *s == '-') m += strtol(s, &s, 10); if (r) { t -= 5; while (m-- > 0) *t++ = r; while (*s && *s++ != '}'); } else t += snprintf(t, 32, "%ld", m); while (*t = *s++) t++; break; } continue; default: continue; } break; } return os; } int main(int argc, char** argv) { int flags; int cflags; int eflags; int nmatch; int nexec; int nstr; int cret; int eret; int nsub; int i; int j; int expected; int got; int locale; int subunitlen; int testno; unsigned long level; unsigned long skip; char* p; char* line; char* spec; char* re; char* s; char* ans; char* msg; char* fun; char* ppat; char* subunit; char* version; char* field[6]; char* delim[6]; FILE* fp; int tabs[6]; char unit[64]; regmatch_t match[100]; regex_t preg; static char pat[32 * 1024]; static char patbuf[32 * 1024]; static char strbuf[32 * 1024]; int nonosub = REG_NOSUB == 0; int nonexec = 0; unsigned long test = 0; static char* filter[] = { "-", 0 }; state.NOMATCH.rm_so = state.NOMATCH.rm_eo = -2; p = unit; version = (char*)id + 10; while (p < &unit[sizeof(unit)-1] && (*p = *version++) && !isspace(*p)) p++; *p = 0; while ((p = *++argv) && *p == '-') for (;;) { switch (*++p) { case 0: break; case 'c': test |= TEST_CATCH; continue; case 'e': test |= TEST_IGNORE_ERROR; continue; case 'h': case '?': help(0); return 2; case '-': help(p[1] == 'h'); return 2; case 'n': nonexec = 1; continue; case 'o': test |= TEST_IGNORE_OVER; continue; case 'p': test |= TEST_IGNORE_POSITION; continue; case 's': #ifdef REG_DISCIPLINE if (!(state.stack = stkalloc(stkstd, 0))) fprintf(stderr, "%s: out of space [stack]", unit); state.disc.disc.re_resizef = resizef; state.disc.disc.re_resizehandle = (void*)stkstd; #endif continue; case 'x': nonosub = 1; continue; case 'v': test |= TEST_VERBOSE; continue; case 'A': test |= TEST_ACTUAL; continue; case 'B': test |= TEST_BASELINE; continue; case 'F': test |= TEST_FAIL; continue; case 'P': test |= TEST_PASS; continue; case 'S': test |= TEST_SUMMARY; continue; default: fprintf(stderr, "%s: %c: invalid option\n", unit, *p); return 2; } break; } if (!*argv) argv = filter; locale = 0; while (state.file = *argv++) { if (streq(state.file, "-") || streq(state.file, "/dev/stdin") || streq(state.file, "/dev/fd/0")) { state.file = 0; fp = stdin; } else if (!(fp = fopen(state.file, "r"))) { fprintf(stderr, "%s: %s: cannot read\n", unit, state.file); return 2; } testno = state.errors = state.ignored = state.lineno = state.passed = state.signals = state.unspecified = state.warnings = 0; skip = 0; level = 1; if (!(test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_SUMMARY))) { printf("TEST\t%s ", unit); if (s = state.file) { subunit = p = 0; for (;;) { switch (*s++) { case 0: break; case '/': subunit = s; continue; case '.': p = s - 1; continue; default: continue; } break; } if (!subunit) subunit = state.file; if (p < subunit) p = s - 1; subunitlen = p - subunit; printf("%-.*s ", subunitlen, subunit); } else subunit = 0; for (s = version; *s && (*s != ' ' || *(s + 1) != '$'); s++) putchar(*s); if (test & TEST_CATCH) printf(", catch"); if (test & TEST_IGNORE_ERROR) printf(", ignore error code mismatches"); if (test & TEST_IGNORE_POSITION) printf(", ignore negative position mismatches"); #ifdef REG_DISCIPLINE if (state.stack) printf(", stack"); #endif if (test & TEST_VERBOSE) printf(", verbose"); printf("\n"); #ifdef REG_VERSIONID if (regerror(REG_VERSIONID, NiL, pat, sizeof(pat)) > 0) s = pat; else #endif #ifdef REG_TEST_VERSION s = REG_TEST_VERSION; #else s = "regex"; #endif printf("NOTE\t%s\n", s); if (elementsof(unsupported) > 1) { #if (REG_TEST_DEFAULT & (REG_AUGMENTED|REG_EXTENDED|REG_SHELL)) || !defined(REG_EXTENDED) i = 0; #else i = REG_EXTENDED != 0; #endif for (got = 0; i < elementsof(unsupported) - 1; i++) { if (!got) { got = 1; printf("NOTE\tunsupported: %s", unsupported[i]); } else printf(",%s", unsupported[i]); } if (got) printf("\n"); } } #ifdef REG_DISCIPLINE state.disc.disc.re_version = REG_VERSION; state.disc.disc.re_compf = compf; state.disc.disc.re_execf = execf; if (!(state.disc.sp = sfstropen())) bad("out of space [discipline string stream]\n", NiL, NiL, 0, 0); preg.re_disc = &state.disc.disc; #endif if (test & TEST_CATCH) { signal(SIGALRM, gotcha); signal(SIGBUS, gotcha); signal(SIGSEGV, gotcha); } while (p = getline(fp)) { /* parse: */ line = p; if (*p == ':' && !isspace(*(p + 1))) { while (*++p && *p != ':'); if (!*p++) { if (test & TEST_BASELINE) printf("%s\n", line); continue; } } while (isspace(*p)) p++; if (*p == 0 || *p == '#' || *p == 'T') { if (test & TEST_BASELINE) printf("%s\n", line); continue; } if (*p == ':' || *p == 'N') { if (test & TEST_BASELINE) printf("%s\n", line); else if (!(test & (TEST_ACTUAL|TEST_FAIL|TEST_PASS|TEST_SUMMARY))) { while (*++p && !isspace(*p)); while (isspace(*p)) p++; printf("NOTE %s\n", p); } continue; } j = 0; i = 0; field[i++] = p; for (;;) { switch (*p++) { case 0: p--; j = 0; goto checkfield; case '\t': *(delim[i] = p - 1) = 0; j = 1; checkfield: s = field[i - 1]; if (streq(s, "NIL")) field[i - 1] = 0; else if (streq(s, "NULL")) *s = 0; while (*p == '\t') { p++; j++; } tabs[i - 1] = j; if (!*p) break; if (i >= elementsof(field)) bad("too many fields\n", NiL, NiL, 0, 0); field[i++] = p; /*FALLTHROUGH*/ default: continue; } break; } if (!(spec = field[0])) bad("NIL spec\n", NiL, NiL, 0, 0); /* interpret: */ cflags = REG_TEST_DEFAULT; eflags = REG_EXEC_DEFAULT; test &= TEST_GLOBAL; state.extracted = 0; nmatch = 20; nsub = -1; for (p = spec; *p; p++) { if (isdigit(*p)) { nmatch = strtol(p, &p, 10); if (nmatch >= elementsof(match)) bad("nmatch must be < 100\n", NiL, NiL, 0, 0); p--; continue; } switch (*p) { case 'A': test |= TEST_ARE; continue; case 'B': test |= TEST_BRE; continue; case 'C': if (!(test & TEST_QUERY) && !(skip & level)) bad("locale must be nested\n", NiL, NiL, 0, 0); test &= ~TEST_QUERY; if (locale) bad("locale nesting not supported\n", NiL, NiL, 0, 0); if (i != 2) bad("locale field expected\n", NiL, NiL, 0, 0); if (!(skip & level)) { #if defined(LC_COLLATE) && defined(LC_CTYPE) s = field[1]; if (!s || streq(s, "POSIX")) s = "C"; if ((ans = setlocale(LC_COLLATE, s)) && streq(ans, "POSIX")) ans = "C"; if (!ans || !streq(ans, s) && streq(s, "C")) ans = 0; else if ((ans = setlocale(LC_CTYPE, s)) && streq(ans, "POSIX")) ans = "C"; if (!ans || !streq(ans, s) && streq(s, "C")) skip = note(level, s, skip, test); else { if (!(test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_SUMMARY))) printf("NOTE \"%s\" locale\n", s); locale = level; } #else skip = note(level, skip, test, "locales not supported"); #endif } cflags = NOTEST; continue; case 'E': test |= TEST_ERE; continue; case 'K': test |= TEST_KRE; continue; case 'L': test |= TEST_LRE; continue; case 'S': test |= TEST_SRE; continue; case 'a': cflags |= REG_LEFT|REG_RIGHT; continue; case 'b': eflags |= REG_NOTBOL; continue; case 'c': cflags |= REG_COMMENT; continue; case 'd': cflags |= REG_SHELL_DOT; continue; case 'e': eflags |= REG_NOTEOL; continue; case 'f': cflags |= REG_MULTIPLE; continue; case 'g': cflags |= NOTEST; continue; case 'h': cflags |= REG_MULTIREF; continue; case 'i': cflags |= REG_ICASE; continue; case 'j': cflags |= REG_SPAN; continue; case 'k': cflags |= REG_ESCAPE; continue; case 'l': cflags |= REG_LEFT; continue; case 'm': cflags |= REG_MINIMAL; continue; case 'n': cflags |= REG_NEWLINE; continue; case 'o': cflags |= REG_SHELL_GROUP; continue; case 'p': cflags |= REG_SHELL_PATH; continue; case 'q': cflags |= REG_DELIMITED; continue; case 'r': cflags |= REG_RIGHT; continue; case 's': cflags |= REG_SHELL_ESCAPED; continue; case 't': cflags |= REG_MUSTDELIM; continue; case 'u': test |= TEST_UNSPECIFIED; continue; case 'v': cflags |= REG_CLASS_ESCAPE; continue; case 'w': cflags |= REG_NOSUB; continue; case 'x': if (REG_LENIENT) cflags |= REG_LENIENT; else test |= TEST_LENIENT; continue; case 'y': eflags |= REG_LEFT; continue; case 'z': cflags |= REG_NULL; continue; case '$': test |= TEST_EXPAND; continue; case '/': test |= TEST_SUB; continue; case '=': test |= TEST_DECOMP; continue; case '?': test |= TEST_VERIFY; test &= ~(TEST_AND|TEST_OR); state.verify = state.passed; continue; case '&': test |= TEST_VERIFY|TEST_AND; test &= ~TEST_OR; continue; case '|': test |= TEST_VERIFY|TEST_OR; test &= ~TEST_AND; continue; case ';': test |= TEST_OR; test &= ~TEST_AND; continue; case '{': level <<= 1; if (skip & (level >> 1)) { skip |= level; cflags = NOTEST; } else { skip &= ~level; test |= TEST_QUERY; } continue; case '}': if (level == 1) bad("invalid {...} nesting\n", NiL, NiL, 0, 0); if ((skip & level) && !(skip & (level>>1))) { if (!(test & (TEST_BASELINE|TEST_SUMMARY))) { if (test & (TEST_ACTUAL|TEST_FAIL)) printf("}\n"); else if (!(test & TEST_PASS)) printf("-%d\n", state.lineno); } } #if defined(LC_COLLATE) && defined(LC_CTYPE) else if (locale & level) { locale = 0; if (!(skip & level)) { s = "C"; setlocale(LC_COLLATE, s); setlocale(LC_CTYPE, s); if (!(test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_SUMMARY))) printf("NOTE \"%s\" locale\n", s); else if (test & (TEST_ACTUAL|TEST_BASELINE|TEST_PASS)) printf("}\n"); } else if (test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL)) printf("}\n"); } #endif level >>= 1; cflags = NOTEST; continue; default: bad("bad spec\n", spec, NiL, 0, test); break; } break; } if ((cflags|eflags) == NOTEST || (skip & level) && (test & TEST_BASELINE)) { if (test & TEST_BASELINE) { while (i > 1) *delim[--i] = '\t'; printf("%s\n", line); } continue; } if (test & TEST_OR) { if (!(test & TEST_VERIFY)) { test &= ~TEST_OR; if (state.passed == state.verify && i > 1) printf("NOTE\t%s\n", field[1]); continue; } else if (state.passed > state.verify) continue; } else if (test & TEST_AND) { if (state.passed == state.verify) continue; state.passed = state.verify; } if (i < ((test & TEST_DECOMP) ? 3 : 4)) bad("too few fields\n", NiL, NiL, 0, test); while (i < elementsof(field)) field[i++] = 0; if (re = field[1]) { if (streq(re, "SAME")) { re = ppat; test |= TEST_SAME; } else { if (test & TEST_EXPAND) escape(re); re = expand(re, patbuf); strcpy(ppat = pat, re); } } else ppat = 0; nstr = -1; if (s = field[2]) { s = expand(s, strbuf); if (test & TEST_EXPAND) { nstr = escape(s); #if _REG_nexec if (nstr != strlen(s)) nexec = nstr; #endif } } if (!(ans = field[(test & TEST_DECOMP) ? 2 : 3])) bad("NIL answer\n", NiL, NiL, 0, test); msg = field[4]; fflush(stdout); if (test & TEST_SUB) #if _REG_subcomp cflags |= REG_DELIMITED; #else continue; #endif #if !_REG_decomp if (test & TEST_DECOMP) continue; #endif compile: if (state.extracted || (skip & level)) continue; #if !(REG_TEST_DEFAULT & (REG_AUGMENTED|REG_EXTENDED|REG_SHELL)) #ifdef REG_EXTENDED if (REG_EXTENDED != 0 && (test & TEST_BRE)) #else if (test & TEST_BRE) #endif { test &= ~TEST_BRE; flags = cflags; state.which = "BRE"; } else #endif #ifdef REG_EXTENDED if (test & TEST_ERE) { test &= ~TEST_ERE; flags = cflags | REG_EXTENDED; state.which = "ERE"; } else #endif #ifdef REG_AUGMENTED if (test & TEST_ARE) { test &= ~TEST_ARE; flags = cflags | REG_AUGMENTED; state.which = "ARE"; } else #endif #ifdef REG_LITERAL if (test & TEST_LRE) { test &= ~TEST_LRE; flags = cflags | REG_LITERAL; state.which = "LRE"; } else #endif #ifdef REG_SHELL if (test & TEST_SRE) { test &= ~TEST_SRE; flags = cflags | REG_SHELL; state.which = "SRE"; } else #ifdef REG_AUGMENTED if (test & TEST_KRE) { test &= ~TEST_KRE; flags = cflags | REG_SHELL | REG_AUGMENTED; state.which = "KRE"; } else #endif #endif { if (test & (TEST_BASELINE|TEST_PASS|TEST_VERIFY)) extract(tabs, line, re, s, ans, msg, NiL, NiL, 0, 0, skip, level, test|TEST_OK); continue; } if ((test & (TEST_QUERY|TEST_VERBOSE|TEST_VERIFY)) == TEST_VERBOSE) { printf("test %-3d %s ", state.lineno, state.which); quote(re, -1, test|TEST_DELIMIT); printf(" "); quote(s, nstr, test|TEST_DELIMIT); printf("\n"); } nosub: fun = "regcomp"; #if _REG_nexec if (nstr >= 0 && nstr != strlen(s)) nexec = nstr; else #endif nexec = -1; if (state.extracted || (skip & level)) continue; if (!(test & TEST_QUERY)) testno++; #ifdef REG_DISCIPLINE if (state.stack) stkset(stkstd, state.stack, 0); flags |= REG_DISCIPLINE; state.disc.ordinal = 0; sfstrseek(state.disc.sp, 0, SEEK_SET); #endif if (!(test & TEST_CATCH)) cret = regcomp(&preg, re, flags); else if (!(cret = setjmp(state.gotcha))) { alarm(HUNG); cret = regcomp(&preg, re, flags); alarm(0); } #if _REG_subcomp if (!cret && (test & TEST_SUB)) { fun = "regsubcomp"; p = re + preg.re_npat; if (!(test & TEST_CATCH)) cret = regsubcomp(&preg, p, NiL, 0, 0); else if (!(cret = setjmp(state.gotcha))) { alarm(HUNG); cret = regsubcomp(&preg, p, NiL, 0, 0); alarm(0); } if (!cret && *(p += preg.re_npat) && !(preg.re_sub->re_flags & REG_SUB_LAST)) { if (catchfree(&preg, flags, tabs, line, re, s, ans, msg, NiL, NiL, 0, 0, skip, level, test)) continue; cret = REG_EFLAGS; } } #endif #if _REG_decomp if (!cret && (test & TEST_DECOMP)) { char buf[128]; if ((j = nmatch) > sizeof(buf)) j = sizeof(buf); fun = "regdecomp"; p = re + preg.re_npat; if (!(test & TEST_CATCH)) i = regdecomp(&preg, -1, buf, j); else if (!(cret = setjmp(state.gotcha))) { alarm(HUNG); i = regdecomp(&preg, -1, buf, j); alarm(0); } if (!cret) { catchfree(&preg, flags, tabs, line, re, s, ans, msg, NiL, NiL, 0, 0, skip, level, test); if (i > j) { if (i != (strlen(ans) + 1)) { report("failed", fun, re, s, nstr, msg, flags, test); printf(" %d byte buffer supplied, %d byte buffer required\n", j, i); } } else if (strcmp(buf, ans)) { report("failed", fun, re, s, nstr, msg, flags, test); quote(ans, -1, test|TEST_DELIMIT); printf(" expected, "); quote(buf, -1, test|TEST_DELIMIT); printf(" returned\n"); } continue; } } #endif if (!cret) { if (!(flags & REG_NOSUB) && nsub < 0 && *ans == '(') { for (p = ans; *p; p++) if (*p == '(') nsub++; else if (*p == '{') nsub--; if (nsub >= 0) { if (test & TEST_IGNORE_OVER) { if (nmatch > nsub) nmatch = nsub + 1; } else if (nsub != preg.re_nsub) { if (nsub > preg.re_nsub) { if (test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_QUERY|TEST_SUMMARY|TEST_VERIFY)) skip = extract(tabs, line, re, s, ans, msg, "OK", NiL, 0, 0, skip, level, test|TEST_DELIMIT); else { report("re_nsub incorrect", fun, re, NiL, -1, msg, flags, test); printf("at least %d expected, %d returned\n", nsub, (int)preg.re_nsub); state.errors++; } } else nsub = preg.re_nsub; } } } if (!(test & (TEST_DECOMP|TEST_SUB)) && *ans && *ans != '(' && !streq(ans, "OK") && !streq(ans, "NOMATCH")) { if (test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_QUERY|TEST_SUMMARY|TEST_VERIFY)) skip = extract(tabs, line, re, s, ans, msg, "OK", NiL, 0, 0, skip, level, test|TEST_DELIMIT); else if (!(test & TEST_LENIENT)) { report("failed", fun, re, NiL, -1, msg, flags, test); printf("%s expected, OK returned\n", ans); } catchfree(&preg, flags, tabs, line, re, s, ans, msg, NiL, NiL, 0, 0, skip, level, test); continue; } } else { if (test & TEST_LENIENT) /* we'll let it go this time */; else if (!*ans || ans[0]=='(' || cret == REG_BADPAT && streq(ans, "NOMATCH")) { got = 0; for (i = 1; i < elementsof(codes); i++) if (cret==codes[i].code) got = i; if (test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_QUERY|TEST_SUMMARY|TEST_VERIFY)) skip = extract(tabs, line, re, s, ans, msg, codes[got].name, NiL, 0, 0, skip, level, test|TEST_DELIMIT); else { report("failed", fun, re, NiL, -1, msg, flags, test); printf("%s returned: ", codes[got].name); error(&preg, cret); } } else { expected = got = 0; for (i = 1; i < elementsof(codes); i++) { if (streq(ans, codes[i].name)) expected = i; if (cret==codes[i].code) got = i; } if (!expected) { if (test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_QUERY|TEST_SUMMARY|TEST_VERIFY)) skip = extract(tabs, line, re, s, ans, msg, codes[got].name, NiL, 0, 0, skip, level, test|TEST_DELIMIT); else { report("failed: invalid error code", NiL, re, NiL, -1, msg, flags, test); printf("%s expected, %s returned\n", ans, codes[got].name); } } else if (cret != codes[expected].code && cret != REG_BADPAT) { if (test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_QUERY|TEST_SUMMARY|TEST_VERIFY)) skip = extract(tabs, line, re, s, ans, msg, codes[got].name, NiL, 0, 0, skip, level, test|TEST_DELIMIT); else if (test & TEST_IGNORE_ERROR) state.ignored++; else { report("should fail and did", fun, re, NiL, -1, msg, flags, test); printf("%s expected, %s returned: ", ans, codes[got].name); state.errors--; state.warnings++; error(&preg, cret); } } } goto compile; } #if _REG_nexec execute: if (nexec >= 0) fun = "regnexec"; else #endif fun = "regexec"; for (i = 0; i < elementsof(match); i++) match[i] = state.NOMATCH; #if _REG_nexec if (nexec >= 0) { eret = regnexec(&preg, s, nexec, nmatch, match, eflags); s[nexec] = 0; } else #endif { if (!(test & TEST_CATCH)) eret = regexec(&preg, s, nmatch, match, eflags); else if (!(eret = setjmp(state.gotcha))) { alarm(HUNG); eret = regexec(&preg, s, nmatch, match, eflags); alarm(0); } } #if _REG_subcomp if ((test & TEST_SUB) && !eret) { fun = "regsubexec"; if (!(test & TEST_CATCH)) eret = regsubexec(&preg, s, nmatch, match); else if (!(eret = setjmp(state.gotcha))) { alarm(HUNG); eret = regsubexec(&preg, s, nmatch, match); alarm(0); } } #endif if (flags & REG_NOSUB) { if (eret) { if (eret != REG_NOMATCH || !streq(ans, "NOMATCH")) { if (test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_QUERY|TEST_SUMMARY|TEST_VERIFY)) skip = extract(tabs, line, re, s, ans, msg, "NOMATCH", NiL, 0, 0, skip, level, test|TEST_DELIMIT); else { report("REG_NOSUB failed", fun, re, s, nstr, msg, flags, test); error(&preg, eret); } } } else if (streq(ans, "NOMATCH")) { if (test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_QUERY|TEST_SUMMARY|TEST_VERIFY)) skip = extract(tabs, line, re, s, ans, msg, NiL, match, nmatch, nsub, skip, level, test|TEST_DELIMIT); else { report("should fail and didn't", fun, re, s, nstr, msg, flags, test); error(&preg, eret); } } } else if (eret) { if (eret != REG_NOMATCH || !streq(ans, "NOMATCH")) { if (test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_QUERY|TEST_SUMMARY|TEST_VERIFY)) skip = extract(tabs, line, re, s, ans, msg, "NOMATCH", NiL, 0, nsub, skip, level, test|TEST_DELIMIT); else { report("failed", fun, re, s, nstr, msg, flags, test); if (eret != REG_NOMATCH) error(&preg, eret); else if (*ans) printf("expected: %s\n", ans); else printf("\n"); } } } else if (streq(ans, "NOMATCH")) { if (test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_QUERY|TEST_SUMMARY|TEST_VERIFY)) skip = extract(tabs, line, re, s, ans, msg, NiL, match, nmatch, nsub, skip, level, test|TEST_DELIMIT); else { report("should fail and didn't", fun, re, s, nstr, msg, flags, test); matchprint(match, nmatch, nsub, NiL, test); } } #if _REG_subcomp else if (test & TEST_SUB) { p = preg.re_sub->re_buf; if (strcmp(p, ans)) { report("failed", fun, re, s, nstr, msg, flags, test); quote(ans, -1, test|TEST_DELIMIT); printf(" expected, "); quote(p, -1, test|TEST_DELIMIT); printf(" returned\n"); } } #endif else if (!*ans) { if (match[0].rm_so != state.NOMATCH.rm_so) { if (test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_QUERY|TEST_SUMMARY|TEST_VERIFY)) skip = extract(tabs, line, re, s, ans, msg, NiL, NiL, 0, 0, skip, level, test); else { report("failed: no match but match array assigned", NiL, re, s, nstr, msg, flags, test); matchprint(match, nmatch, nsub, NiL, test); } } } else if (matchcheck(match, nmatch, nsub, ans, re, s, nstr, flags, test)) { #if _REG_nexec if (nexec < 0 && !nonexec) { nexec = nstr >= 0 ? nstr : strlen(s); s[nexec] = '\n'; testno++; goto execute; } #endif if (!(test & (TEST_DECOMP|TEST_SUB|TEST_VERIFY)) && !nonosub) { if (catchfree(&preg, flags, tabs, line, re, s, ans, msg, NiL, NiL, 0, 0, skip, level, test)) continue; flags |= REG_NOSUB; goto nosub; } if (test & (TEST_BASELINE|TEST_PASS|TEST_VERIFY)) skip = extract(tabs, line, re, s, ans, msg, NiL, match, nmatch, nsub, skip, level, test|TEST_OK); } else if (test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS|TEST_QUERY|TEST_SUMMARY|TEST_VERIFY)) skip = extract(tabs, line, re, s, ans, msg, NiL, match, nmatch, nsub, skip, level, test|TEST_DELIMIT); if (catchfree(&preg, flags, tabs, line, re, s, ans, msg, NiL, NiL, 0, 0, skip, level, test)) continue; goto compile; } if (test & TEST_SUMMARY) printf("tests=%-4d errors=%-4d warnings=%-2d ignored=%-2d unspecified=%-2d signals=%d\n", testno, state.errors, state.warnings, state.ignored, state.unspecified, state.signals); else if (!(test & (TEST_ACTUAL|TEST_BASELINE|TEST_FAIL|TEST_PASS))) { printf("TEST\t%s", unit); if (subunit) printf(" %-.*s", subunitlen, subunit); printf(", %d test%s", testno, testno == 1 ? "" : "s"); if (state.ignored) printf(", %d ignored mismatche%s", state.ignored, state.ignored == 1 ? "" : "s"); if (state.warnings) printf(", %d warning%s", state.warnings, state.warnings == 1 ? "" : "s"); if (state.unspecified) printf(", %d unspecified difference%s", state.unspecified, state.unspecified == 1 ? "" : "s"); if (state.signals) printf(", %d signal%s", state.signals, state.signals == 1 ? "" : "s"); printf(", %d error%s\n", state.errors, state.errors == 1 ? "" : "s"); } if (fp != stdin) fclose(fp); } return 0; } skytools-3.2.6/lib/test/attregex/Makefile0000644000000000000000000000112612166266754015326 0ustar SUBLOC = test/attregex AM_CPPFLAGS = -I$(top_srcdir) -I$(top_builddir) VPATH = $(top_srcdir) USUAL_DIR = $(top_srcdir) EXTRA_PROGRAMS = testregex.usual testregex.libc testregex_usual_SOURCES = testregex.c testregex_usual_DEFS = -DUSUAL -DUSE_INTERNAL_REGEX testregex_usual_EMBED_LIBUSUAL = 1 testregex_libc_SOURCES = testregex.c EXTRA_DIST = Makefile run.sh \ data/basic.dat \ data/categorize.dat \ data/forcedassoc.dat \ data/interpretation.dat \ data/leftassoc.dat \ data/nullsubexpr.dat \ data/repetition.dat \ data/rightassoc.dat test: $(EXTRA_PROGRAMS) include ../../build.mk skytools-3.2.6/lib/test/test_cfparser.ini0000644000000000000000000000012412166266754015405 0ustar [one] str1 = val1 int = 5 bool = 1 [two] str2 = val2 time1 = 1.5 time2 = 2.5 skytools-3.2.6/lib/test/test_cfparser.c0000644000000000000000000000624212166266754015057 0ustar #include #include #include #include #include "test_common.h" struct Config1 { const char *str1; const char *def1; int int1; int bool1; }; struct Config2 { const char *str2; const char *def2; double time_double; usec_t time_usec; }; static struct Config1 cf1; static struct Config2 cf2; static void cleanup(void) { free(cf1.str1); free(cf1.def1); free(cf2.str2); free(cf2.def2); memset(&cf1, 0, sizeof(cf1)); memset(&cf2, 0, sizeof(cf2)); } static const struct CfKey keys1 [] = { CF_ABS("str1", CF_STR, cf1.str1, 0, NULL), CF_ABS("def1", CF_STR, cf1.def1, 0, NULL), CF_ABS("int", CF_INT, cf1.int1, 0, NULL), CF_ABS("bool", CF_BOOL, cf1.bool1, 0, NULL), { NULL }, }; static const struct CfKey keys2 [] = { CF_ABS("str2", CF_STR, cf2.str2, 0, NULL), CF_ABS("def2", CF_STR, cf2.def2, 0, "somedefault"), CF_ABS("time1", CF_TIME_USEC, cf2.time_usec, 0, NULL), CF_ABS("time2", CF_TIME_DOUBLE, cf2.time_double, 0, NULL), { NULL }, }; static const struct CfSect sects [] = { { "one", keys1 }, { "two", keys2 }, { NULL }, }; static struct CfContext cfdesc1 = { sects, NULL }; static void test_abs(void *ptr) { char buf[128]; int_check(1, cf_load_file(&cfdesc1, "test_cfparser.ini")); str_check(cf1.str1, "val1"); tt_assert(cf1.def1 == NULL); str_check(cf2.str2, "val2"); str_check(cf2.def2, "somedefault"); tt_assert(cf2.time_usec == (3 * USEC / 2)); tt_assert(cf2.time_double == 2.5); str_check("val1", cf_get(&cfdesc1, "one", "str1", buf, sizeof(buf))); int_check(1, cf_set(&cfdesc1, "one", "str1", "val2")); str_check("val2", cf_get(&cfdesc1, "one", "str1", buf, sizeof(buf))); end: cleanup(); } /* * relative addressing. */ #define CF_REL_BASE struct Config1 static const struct CfKey rkeys1 [] = { CF_REL("str1", CF_STR, str1, 0, NULL), CF_REL("def1", CF_STR, def1, 0, NULL), CF_REL("int", CF_INT, int1, 0, NULL), CF_REL("bool", CF_BOOL, bool1, 0, NULL), { NULL }, }; #undef CF_REL_BASE #define CF_REL_BASE struct Config2 static const struct CfKey rkeys2 [] = { CF_REL("str2", CF_STR, str2, 0, NULL), CF_REL("def2", CF_STR, def2, 0, "somedefault"), CF_REL("time1", CF_TIME_USEC, time_usec, 0, NULL), CF_REL("time2", CF_TIME_DOUBLE, time_double, 0, NULL), { NULL }, }; #undef CF_REL_BASE static void *get_two(void *top_arg, const char *sect_name) { return &cf2; } static const struct CfSect rsects [] = { { "one", rkeys1 }, { "two", rkeys2, get_two, }, { NULL }, }; static struct CfContext cfdesc2 = { rsects, &cf1 }; static void test_rel(void *ptr) { char buf[128]; cleanup(); int_check(1, cf_load_file(&cfdesc2, "test_cfparser.ini")); str_check(cf1.str1, "val1"); tt_assert(cf1.def1 == NULL); str_check(cf2.str2, "val2"); str_check(cf2.def2, "somedefault"); tt_assert(cf2.time_usec == (3 * USEC / 2)); tt_assert(cf2.time_double == 2.5); str_check("val1", cf_get(&cfdesc2, "one", "str1", buf, sizeof(buf))); int_check(1, cf_set(&cfdesc2, "one", "str1", "val2")); str_check("val2", cf_get(&cfdesc2, "one", "str1", buf, sizeof(buf))); end: cleanup(); } /* * Describe */ struct testcase_t cfparser_tests[] = { { "abs", test_abs }, { "rel", test_rel }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/force_compat.sed0000644000000000000000000000073112166266754015202 0ustar /^#define.*FFS/s,.*,/* & */, /^#define.*FLS/s,.*,/* & */, /^#define.*STRLCPY/s,.*,/* & */, /^#define.*STRLCAT/s,.*,/* & */, /^#define.*BASENAME/s,.*,/* & */, /^#define.*DIRNAME/s,.*,/* & */, /^#define.*REGCOMP/s,.*,/* & */, /^#define.*GETADDRINFO_A/s,.*,/* & */, /^#define.*INET_NTOP/s,.*,/* & */, /^#define.*INET_PTON/s,.*,/* & */, /^#define.*GETOPT/s,.*,/* & */, /^#define.*CTYPE_ON_CHAR/s,.*,/* & */, /^#define.*FNMATCH/s,.*,/* & */, /^#define.*MBSNRTOWCS/s,.*,/* & */, skytools-3.2.6/lib/test/test_fnmatch.c0000644000000000000000000001533212166266754014672 0ustar #include #include #include #include "test_common.h" /* * POSIX syntax. */ static void test_fnmatch_posix(void *p) { /* literal */ int_check(0, fnmatch("", "", 0)); int_check(0, fnmatch("a", "a", 0)); int_check(0, fnmatch("abc", "abc", 0)); int_check(1, fnmatch("", "b", 0)); int_check(1, fnmatch("a", "", 0)); int_check(1, fnmatch("a", "b", 0)); /* single wildcard */ int_check(0, fnmatch("a?", "ax", 0)); int_check(0, fnmatch("??", "ax", 0)); int_check(1, fnmatch("?", "ax", 0)); int_check(1, fnmatch("???", "ax", 0)); /* wildcard */ int_check(0, fnmatch("a*", "ax", 0)); int_check(0, fnmatch("*", "", 0)); int_check(0, fnmatch("*", "qwe", 0)); int_check(0, fnmatch("ab*ab", "abxxab", 0)); int_check(1, fnmatch("ab*ab", "abxxabc", 0)); /* wildcard+ */ int_check(0, fnmatch("ab*ab*", "abxxabc", 0)); int_check(0, fnmatch("ab*ab*c", "abxxabc", 0)); int_check(0, fnmatch("ab*ab*c", "abxxabxc", 0)); int_check(0, fnmatch("??*??", "abxxab", 0)); int_check(0, fnmatch("*??", "abxxab", 0)); int_check(0, fnmatch("??*", "abxxab", 0)); int_check(0, fnmatch("a**c", "abc", 0)); /* classes */ int_check(0, fnmatch("[abc]", "b", 0)); int_check(1, fnmatch("[abc]", "x", 0)); int_check(0, fnmatch("[a-c]", "b", 0)); int_check(1, fnmatch("[a-c]", "x", 0)); int_check(0, fnmatch("[b-b]", "b", 0)); int_check(1, fnmatch("[!abc]", "b", 0)); int_check(1, fnmatch("[!a-c]", "b", 0)); int_check(0, fnmatch("[!a-c]", "x", 0)); int_check(0, fnmatch("[*?[][*?[][*?[]", "*?[", 0)); int_check(0, fnmatch("[[:alpha:]][![:alpha:]]", "a9", 0)); int_check(0, fnmatch("[[:alnum:]][![:alnum:]]", "9-", 0)); #ifdef iswblank int_check(0, fnmatch("[[:blank:]][![:blank:]]", " -", 0)); #endif int_check(0, fnmatch("[[:cntrl:]][![:cntrl:]]", "\tx", 0)); int_check(0, fnmatch("[[:digit:]][![:digit:]]", "9a", 0)); int_check(0, fnmatch("[[:graph:]][![:graph:]]", "a\t", 0)); int_check(0, fnmatch("[[:lower:]][![:lower:]]", "aA", 0)); int_check(0, fnmatch("[[:print:]][![:print:]]", "a\n", 0)); int_check(0, fnmatch("[[:punct:]][![:punct:]]", ".x", 0)); int_check(0, fnmatch("[[:space:]][![:space:]]", " x", 0)); int_check(0, fnmatch("[[:upper:]][![:upper:]]", "Ff", 0)); int_check(0, fnmatch("[[:xdigit:]][![:xdigit:]]", "Fx", 0)); int_check(0, fnmatch("[", "[", 0)); int_check(0, fnmatch("[f", "[f", 0)); /* escaping */ int_check(1, fnmatch("\\a\\?", "ax", 0)); int_check(0, fnmatch("\\a\\?", "a?", 0)); int_check(1, fnmatch("\\a\\*", "ax", 0)); int_check(0, fnmatch("\\a\\*", "a*", 0)); int_check(0, fnmatch("\\[a]", "[a]", 0)); int_check(0, fnmatch("\\\\", "\\", 0)); int_check(0, fnmatch("\\$\\'\\\"\\<\\>", "$'\"<>", 0)); int_check(1, fnmatch("a\\", "a", 0)); int_check(1, fnmatch("a\\", "a\\", 0)); int_check(0, fnmatch("a\\", "a\\", FNM_NOESCAPE)); int_check(0, fnmatch("\\[a]", "\\a", FNM_NOESCAPE)); int_check(0, fnmatch("\\*b", "\\aab", FNM_NOESCAPE)); /* FNM_PATHNAME */ int_check(0, fnmatch("ab*c", "ab/c", 0)); int_check(1, fnmatch("ab*c", "ab/c", FNM_PATHNAME)); int_check(1, fnmatch("ab?c", "ab/c", FNM_PATHNAME)); int_check(1, fnmatch("ab[/]c", "ab/c", FNM_PATHNAME)); int_check(0, fnmatch("/*/", "//", FNM_PATHNAME)); int_check(1, fnmatch("a[b/c]d", "a/d", FNM_PATHNAME)); int_check(0, fnmatch("abd", "abd", FNM_PATHNAME)); int_check(1, fnmatch("a[b/c]d", "a[b/c]d", FNM_PATHNAME)); /* FNM_PERIOD */ int_check(0, fnmatch(".foo", ".foo", 0)); int_check(0, fnmatch("?foo", ".foo", 0)); int_check(0, fnmatch("[.]foo", ".foo", 0)); int_check(0, fnmatch("[!abc]foo", ".foo", 0)); int_check(0, fnmatch("*foo", ".foo", 0)); int_check(0, fnmatch(".foo", ".foo", FNM_PERIOD)); int_check(1, fnmatch("*foo", ".foo", FNM_PERIOD)); int_check(1, fnmatch("?foo", ".foo", FNM_PERIOD)); int_check(0, fnmatch("*/?foo", "sub/.foo", FNM_PERIOD)); int_check(1, fnmatch("*.foo", ".foo", FNM_PERIOD)); int_check(1, fnmatch("[.]foo", ".foo", FNM_PERIOD)); /* FNM_PATHNAME | FNM_PERIOD */ int_check(1, fnmatch("*/?foo", "sub/.foo", FNM_PERIOD|FNM_PATHNAME)); int_check(1, fnmatch("*/[.]foo", "sub/.foo", FNM_PERIOD|FNM_PATHNAME)); int_check(1, fnmatch("*/*.c", "sub/.foo.c", FNM_PERIOD|FNM_PATHNAME)); int_check(1, fnmatch("*/*", "sub/.foo.c", FNM_PERIOD|FNM_PATHNAME)); int_check(0, fnmatch("*/*.c", "sub/foo..c", FNM_PERIOD|FNM_PATHNAME)); int_check(1, fnmatch("*/*.foo", "sub/.foo", FNM_PERIOD|FNM_PATHNAME)); /* escapes in brackets ~ posix */ int_check(0, fnmatch("[A\\]]", "\\]", FNM_NOESCAPE)); int_check(0, fnmatch("[a\\-x]", "_", FNM_NOESCAPE)); end:; } /* * GNU syntax. */ static void test_fnmatch_gnu(void *p) { /* FNM_CASEFOLD */ int_check(1, fnmatch("aaAA", "AaAa", 0)); int_check(1, fnmatch("[b][b][B][B][a-c][A-C][a-c][A-C]", "bBbBbbBB", 0)); int_check(0, fnmatch("aaAA", "AaAa", FNM_CASEFOLD)); int_check(0, fnmatch("[b][b][B][B][a-c][A-C][a-c][A-C]", "bBbBbbBB", FNM_CASEFOLD)); /* FNM_LEADING_DIR */ int_check(0, fnmatch("a", "a", FNM_LEADING_DIR|FNM_PATHNAME)); int_check(0, fnmatch("a", "a/b", FNM_LEADING_DIR|FNM_PATHNAME)); int_check(0, fnmatch("a/b", "a/b/c/d", FNM_LEADING_DIR|FNM_PATHNAME)); int_check(0, fnmatch("a/*/*", "a/b/c/d", FNM_LEADING_DIR|FNM_PATHNAME)); int_check(0, fnmatch("*", "/a", FNM_LEADING_DIR|FNM_PATHNAME)); /* seems wrong to allow it */ int_check(0, fnmatch("a", "a/b", FNM_LEADING_DIR)); /* escapes in brackets ~ gnu */ int_check(0, fnmatch("[A\\]][A\\]]", "]A", 0)); int_check(1, fnmatch("[a\\-x]", "_", 0)); int_check(0, fnmatch("[\\!x]", "!", 0)); int_check(1, fnmatch("[\\!x]", "\\", 0)); int_check(0, fnmatch("[\\[:alnum:]", ":", 0)); end:; } /* * DoS possibilities. */ static void test_fnmatch_weird(void *p) { char pat[4096]; char str[4096]; int i; memset(pat, 0, sizeof(pat)); memset(str, 0, sizeof(str)); memset(pat, '*', 1500); memset(str, 'a', 1500); int_check(0, fnmatch(pat, str, 0)); pat[10] = 'a'; pat[1200] = 'b'; int_check(0, fnmatch(pat, "ab", 0)); for (i = 0; i < 1200; i++) { char c = 'a' + (i%26); pat[i*2] = c; pat[i*2+1] = '*'; str[i*2] = c; str[i*2+1] = c; } pat[i*2] = 0; str[i*2] = 0; int_check(0, fnmatch(pat, str, 0)); for (i = 0; i < 2000; i++) { pat[i*2] = '*'; pat[i*2+1] = '?'; str[i*2] = 'a'; str[i*2+1] = 'b'; } str[i*2] = 0; pat[i*2] = 0; int_check(0, fnmatch(pat, str, 0)); pat[i*2] = 'a'; pat[i*2 + 1] = 0; int_check(1, fnmatch(pat, str, 0)); pat[i*2] = 'b'; int_check(0, fnmatch(pat, str, 0)); pat[i*2] = '*'; pat[3] = 'x'; str[2000] = 'x'; int_check(0, fnmatch(pat, str, 0)); memset(pat, '?', sizeof(pat)); memset(str, 'x', sizeof(str)); str[4000] = 0; pat[2000] = 0; pat[0] = '*'; int_check(0, fnmatch(pat, str, 0)); end:; } struct testcase_t fnmatch_tests[] = { { "posix", test_fnmatch_posix }, { "gnu", test_fnmatch_gnu }, { "weird", test_fnmatch_weird }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/compile.c0000644000000000000000000000255312166266754013644 0ustar #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static inline bool heap_is_better(const void *a, const void *b) { return 1; } int main(void) { struct AATree aatree; struct CBTree *cbtree; struct md5_ctx md5; char buf[128]; static_assert(sizeof(int) >= 4, "unsupported int size"); aatree_init(&aatree, NULL, NULL); cbtree = cbtree_create(NULL, NULL, NULL, USUAL_ALLOC); cbtree_destroy(cbtree); daemonize(NULL, false); hash_lookup3("foo", 3); if (!event_init()) log_debug("test"); if (!parse_ini_file("foo", NULL, NULL)) log_debug("test"); log_stats("1"); file_size("foo"); md5_reset(&md5); strlcpy(buf, "foo", sizeof(buf)); printf("xmalloc: %p\n", xmalloc(128)); if (0) die("0"); return 0; } skytools-3.2.6/lib/test/test_getopt.c0000644000000000000000000000415312166266754014553 0ustar #include #include "test_common.h" #include #include static const char *xgetopt(const char *opts, const struct option *lopts, ...) { static char resbuf[1024]; int i, c, argc = 1; char *argv[100]; va_list ap; char *p = resbuf; resbuf[0] = 'X'; resbuf[1] = 0; argv[0] = "prog"; va_start(ap, lopts); while (1) { argv[argc] = va_arg(ap, char *); if (!argv[argc]) break; argc++; } va_end(ap); opterr = 0; optind = 0; while (1) { if (lopts) c = getopt_long(argc, argv, opts, lopts, NULL); else c = getopt(argc, argv, opts); if (c == -1) break; switch (c) { case '?': return "ERR"; case ':': return "EARG"; case 0: break; default: if (p != resbuf) *p++ = ','; if (optarg) p += sprintf(p, "%c=%s", c, optarg); else p += sprintf(p, "%c", c); } } for (i = optind; i < argc; i++) p += sprintf(p, "|%s", argv[i]); return resbuf; } static void test_getopt(void *_) { str_check(xgetopt("ab:", NULL, "-abFOO", "zzz", NULL), "a,b=FOO|zzz"); str_check(xgetopt("ab:", NULL, "-a", "zzz", "-bFOO", NULL), "a,b=FOO|zzz"); str_check(xgetopt("ab:", NULL, "-b", "FOO", "-", "--", "-a", NULL), "b=FOO|-|-a"); str_check(xgetopt("ab:", NULL, "--foo", NULL), "ERR"); end:; } static void test_getopt_long(void *_) { static int longc; static const char sopts[] = "ab:"; static const struct option lopts[] = { { "longa", no_argument, NULL, 'a'}, { "longb", required_argument, NULL, 'b'}, { "longc", no_argument, &longc, 'C'}, { NULL }, }; str_check(xgetopt(sopts, lopts, "--longa", "--", "--longa", NULL), "a|--longa"); str_check(xgetopt(sopts, lopts, "--longb", "FOO", "ARG", "--longa", NULL), "b=FOO,a|ARG"); str_check(xgetopt(sopts, lopts, "--longb=BAZ", NULL), "b=BAZ"); str_check(xgetopt(sopts, lopts, "--longb", NULL), "ERR"); str_check(xgetopt(sopts, lopts, "--xx", NULL), "ERR"); str_check(xgetopt(sopts, lopts, "-", "--longc", "ARG", NULL), "|-|ARG"); tt_assert(longc == 'C'); end:; } struct testcase_t getopt_tests[] = { { "getopt", test_getopt }, { "getopt_long", test_getopt_long }, END_OF_TESTCASES }; skytools-3.2.6/lib/test/test_shlist.c0000644000000000000000000000572212166266754014562 0ustar #include #include "test_common.h" #include struct MyNode { struct SHList node; char val[16]; }; static const char *xval(const struct SHList *elem) { const struct MyNode *n; if (!elem) return NULL; n = container_of(elem, struct MyNode, node); return n->val; } static struct MyNode *new_node(int v) { struct MyNode *n = malloc(sizeof(*n)); if (!n) return NULL; shlist_init(&n->node); snprintf(n->val, sizeof(n->val), "%d", v); return n; } static const char *check_list(const struct SHList *list) { const struct SHList *old, *cur; old = NULL; for (cur = shlist_next(list, list); cur != list; cur = shlist_next(list, cur)) { if (old) { if (shlist_prev(list, cur) != old) return "FAIL 1"; } else { if (shlist_prev(list, cur) != list) return "FAIL 2"; } old = cur; } if (shlist_prev(list, list) != ((old) ? old : list)) return "FAIL 3"; return "OK"; } static const char *xshow(const struct SHList *list) { static char res[1024]; struct SHList *el; const char *ck = check_list(list); if (strcmp(ck, "OK") != 0) return ck; res[0] = 0; shlist_for_each(el, list) { if (res[0]) strcat(res, ","); strcat(res, xval(el)); } return res; } static const char *xadd(struct SHList *list, int v) { struct MyNode *n = new_node(v); if (!n) return "FAIL"; shlist_append(list, &n->node); return xshow(list); } static const char *xadd1(struct SHList *list, int v) { struct MyNode *n = new_node(v); if (!n) return "FAIL"; shlist_prepend(list, &n->node); return xshow(list); } static const char *xdel(struct SHList *list, int v) { char buf[32]; struct SHList *el, *tmp; struct MyNode *n; snprintf(buf, sizeof(buf), "%d", v); shlist_for_each_safe(el, list, tmp) { n = container_of(el, struct MyNode, node); if (strcmp(buf, n->val) == 0) { shlist_remove(list, el); free(n); } } if (!check_list(list)) return "FAIL"; return xshow(list); } static void test_shlist(void *p) { struct SHList rlist, *list = &rlist; shlist_init(list); str_check(check_list(list), "OK"); str_check(xadd(list, 2), "2"); str_check(xadd1(list, 1), "1,2"); str_check(xadd(list, 3), "1,2,3"); str_check(xadd(list, 4), "1,2,3,4"); str_check(check_list(list), "OK"); { struct MyNode *n; str_check(xadd1(list, 0), "0,1,2,3,4"); n = shlist_pop_type(list, struct MyNode, node); str_check(n->val, "0"); free(n); } { struct MyNode *n; struct SHList *el; str_check(xadd1(list, 0), "0,1,2,3,4"); el = shlist_pop(list); n = container_of(el, struct MyNode, node); str_check(n->val, "0"); free(n); } str_check(xval(shlist_first(list)), "1"); str_check(xval(shlist_last(list)), "4"); int_check(shlist_empty(list), 0); str_check(xdel(list, 2), "1,3,4"); str_check(xdel(list, 1), "3,4"); str_check(xdel(list, 4), "3"); str_check(xdel(list, 3), ""); str_check(check_list(list), "OK"); int_check(shlist_empty(list), 1); end:; } struct testcase_t shlist_tests[] = { { "basic", test_shlist }, END_OF_TESTCASES }; skytools-3.2.6/lib/autogen.sh0000755000000000000000000000004312166266754013062 0ustar #! /bin/sh ./mk/std-autogen.sh . skytools-3.2.6/lib/configure.ac0000644000000000000000000000125312166266754013353 0ustar dnl Process this file with autoconf to produce a configure script. AC_INIT([libusual], [0.1], [https://libusual.github.com]) AC_CONFIG_SRCDIR(usual/base.h) AC_CONFIG_HEADER(usual/config.h) AC_PREREQ([2.59]) AC_USUAL_INIT AC_USUAL_PROGRAM_CHECK LT_INIT AC_USUAL_HEADER_CHECK AC_USUAL_TYPE_CHECK AC_USUAL_FUNCTION_CHECK AC_USUAL_CASSERT AC_USUAL_WERROR AC_USUAL_DEBUG AC_USUAL_UREGEX AC_USUAL_LIBEVENT(opt) AC_USUAL_GETADDRINFO_A dnl search for common libraries AC_SEARCH_LIBS(clock_gettime, rt) AC_SEARCH_LIBS(getsockname, socket) AC_SEARCH_LIBS(gethostbyname, nsl) AC_SEARCH_LIBS(hstrerror, resolv) dnl Output findings AC_OUTPUT([config.mak mk/libusual.pc build.mk]) skytools-3.2.6/lib/find_modules.sh0000755000000000000000000000167112166266754014100 0ustar #! /bin/sh set -e top="$1" # sanity check test -n "$top" || { echo "usage: $0 USUAL_DIR SRC ..." >&2 exit 1 } test -f "$top/usual/base.h" || { echo "usage: $0 USUAL_DIR SRC ..." >&2 exit 1 } shift test -n "$1" || exit 0 test -n "$AWK" || AWK=awk # return uniq module names, exclude already found ones grep_usual() { excl='excl["config"]=1' for m in $m_done; do excl="$excl;excl[\"$m\"]=1" done prog=' BEGIN { '"$excl"' } /^#include[ \t]*[<"]usual\// { p1 = index($0, "/"); p2 = index($0, "."); m = substr($0, p1+1, p2-p1-1); if (!excl[m]) print m; }' $AWK "$prog" "$@" | sort -u } # return module filename globs make_pats() { for m in "$@"; do echo "$top/usual/$m*.[ch]" done } # loop over grep until all mods are found m_done="" m_tocheck=`grep_usual "$@"` while test -n "$m_tocheck"; do m_done="$m_done $m_tocheck" pats=`make_pats $m_tocheck` m_tocheck=`grep_usual $pats` done # done echo $m_done skytools-3.2.6/lib/mk/0000755000000000000000000000000012166266754011473 5ustar skytools-3.2.6/lib/mk/amext-cxx.mk0000644000000000000000000000167012166266754013746 0ustar # # Support for C++ language # # - extensions: .cc, .cpp, cxx # - CXX, CXXFLAGS # - AM_CXXFLAGS, _CXXFLAGS # # autoconfigurable values ifneq ($(filter-out @%,@CXX@),) CXX = @CXX@ CXXFLAGS = @CXXFLAGS@ endif CXX ?= c++ CXXFLAGS ?= -O -g # fixme: add warning flags to CXXFLAGS CXXFLAGS += $(WFLAGS) # helper variables CXXLD ?= $(CXX) CXXCOMPILE ?= $(CXX) $(AM_DEFS) $(DEFS) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLINK ?= $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ # full compile command define AM_LANG_CXX_COMPILE $(E) "CXX" $< $(Q) $(LTCOMPILE) $(CXXCOMPILE) $(OBJDEPS) -c -o $@ $< endef # full link command define AM_LANG_CXX_LINK $(E) "CXXLD" $@ $(Q) $(LTLINK) $(CXXLINK) $^ $(AM_LIBS) $(LIBS) $(AM_LT_RPATH) endef # source file extensions for c++ AM_LANG_CXX_SRCEXTS = .cc .cpp cxx # register per-target variable AM_TARGET_VARIABLES += CXXFLAGS # register new language AM_LANGUAGES += CXX skytools-3.2.6/lib/mk/install-sh0000755000000000000000000003253712166266754013511 0ustar #!/bin/sh # install - install a program, script, or datafile scriptversion=2009-04-28.21; # UTC # This originates from X11R5 (mit/util/scripts/install.sh), which was # later released in X11R6 (xc/config/util/install.sh) with the # following copyright and license. # # Copyright (C) 1994 X Consortium # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN # AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- # TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # Except as contained in this notice, the name of the X Consortium shall not # be used in advertising or otherwise to promote the sale, use or other deal- # ings in this Software without prior written authorization from the X Consor- # tium. # # # FSF changes to this file are in the public domain. # # Calling this script install-sh is preferred over install.sh, to prevent # `make' implicit rules from creating a file called install from it # when there is no Makefile. # # This script is compatible with the BSD install script, but was written # from scratch. nl=' ' IFS=" "" $nl" # set DOITPROG to echo to test this script # Don't use :- since 4.3BSD and earlier shells don't like it. doit=${DOITPROG-} if test -z "$doit"; then doit_exec=exec else doit_exec=$doit fi # Put in absolute file names if you don't have them in your path; # or use environment vars. chgrpprog=${CHGRPPROG-chgrp} chmodprog=${CHMODPROG-chmod} chownprog=${CHOWNPROG-chown} cmpprog=${CMPPROG-cmp} cpprog=${CPPROG-cp} mkdirprog=${MKDIRPROG-mkdir} mvprog=${MVPROG-mv} rmprog=${RMPROG-rm} stripprog=${STRIPPROG-strip} posix_glob='?' initialize_posix_glob=' test "$posix_glob" != "?" || { if (set -f) 2>/dev/null; then posix_glob= else posix_glob=: fi } ' posix_mkdir= # Desired mode of installed file. mode=0755 chgrpcmd= chmodcmd=$chmodprog chowncmd= mvcmd=$mvprog rmcmd="$rmprog -f" stripcmd= src= dst= dir_arg= dst_arg= copy_on_change=false no_target_directory= usage="\ Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE or: $0 [OPTION]... SRCFILES... DIRECTORY or: $0 [OPTION]... -t DIRECTORY SRCFILES... or: $0 [OPTION]... -d DIRECTORIES... In the 1st form, copy SRCFILE to DSTFILE. In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. In the 4th, create DIRECTORIES. Options: --help display this help and exit. --version display version info and exit. -c (ignored) -C install only if different (preserve the last data modification time) -d create directories instead of installing files. -g GROUP $chgrpprog installed files to GROUP. -m MODE $chmodprog installed files to MODE. -o USER $chownprog installed files to USER. -s $stripprog installed files. -t DIRECTORY install into DIRECTORY. -T report an error if DSTFILE is a directory. Environment variables override the default commands: CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG " while test $# -ne 0; do case $1 in -c) ;; -C) copy_on_change=true;; -d) dir_arg=true;; -g) chgrpcmd="$chgrpprog $2" shift;; --help) echo "$usage"; exit $?;; -m) mode=$2 case $mode in *' '* | *' '* | *' '* | *'*'* | *'?'* | *'['*) echo "$0: invalid mode: $mode" >&2 exit 1;; esac shift;; -o) chowncmd="$chownprog $2" shift;; -s) stripcmd=$stripprog;; -t) dst_arg=$2 shift;; -T) no_target_directory=true;; --version) echo "$0 $scriptversion"; exit $?;; --) shift break;; -*) echo "$0: invalid option: $1" >&2 exit 1;; *) break;; esac shift done if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then # When -d is used, all remaining arguments are directories to create. # When -t is used, the destination is already specified. # Otherwise, the last argument is the destination. Remove it from $@. for arg do if test -n "$dst_arg"; then # $@ is not empty: it contains at least $arg. set fnord "$@" "$dst_arg" shift # fnord fi shift # arg dst_arg=$arg done fi if test $# -eq 0; then if test -z "$dir_arg"; then echo "$0: no input file specified." >&2 exit 1 fi # It's OK to call `install-sh -d' without argument. # This can happen when creating conditional directories. exit 0 fi if test -z "$dir_arg"; then trap '(exit $?); exit' 1 2 13 15 # Set umask so as not to create temps with too-generous modes. # However, 'strip' requires both read and write access to temps. case $mode in # Optimize common cases. *644) cp_umask=133;; *755) cp_umask=22;; *[0-7]) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw='% 200' fi cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; *) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw=,u+rw fi cp_umask=$mode$u_plus_rw;; esac fi for src do # Protect names starting with `-'. case $src in -*) src=./$src;; esac if test -n "$dir_arg"; then dst=$src dstdir=$dst test -d "$dstdir" dstdir_status=$? else # Waiting for this to be detected by the "$cpprog $src $dsttmp" command # might cause directories to be created, which would be especially bad # if $src (and thus $dsttmp) contains '*'. if test ! -f "$src" && test ! -d "$src"; then echo "$0: $src does not exist." >&2 exit 1 fi if test -z "$dst_arg"; then echo "$0: no destination specified." >&2 exit 1 fi dst=$dst_arg # Protect names starting with `-'. case $dst in -*) dst=./$dst;; esac # If destination is a directory, append the input filename; won't work # if double slashes aren't ignored. if test -d "$dst"; then if test -n "$no_target_directory"; then echo "$0: $dst_arg: Is a directory" >&2 exit 1 fi dstdir=$dst dst=$dstdir/`basename "$src"` dstdir_status=0 else # Prefer dirname, but fall back on a substitute if dirname fails. dstdir=` (dirname "$dst") 2>/dev/null || expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$dst" : 'X\(//\)[^/]' \| \ X"$dst" : 'X\(//\)$' \| \ X"$dst" : 'X\(/\)' \| . 2>/dev/null || echo X"$dst" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q' ` test -d "$dstdir" dstdir_status=$? fi fi obsolete_mkdir_used=false if test $dstdir_status != 0; then case $posix_mkdir in '') # Create intermediate dirs using mode 755 as modified by the umask. # This is like FreeBSD 'install' as of 1997-10-28. umask=`umask` case $stripcmd.$umask in # Optimize common cases. *[2367][2367]) mkdir_umask=$umask;; .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; *[0-7]) mkdir_umask=`expr $umask + 22 \ - $umask % 100 % 40 + $umask % 20 \ - $umask % 10 % 4 + $umask % 2 `;; *) mkdir_umask=$umask,go-w;; esac # With -d, create the new directory with the user-specified mode. # Otherwise, rely on $mkdir_umask. if test -n "$dir_arg"; then mkdir_mode=-m$mode else mkdir_mode= fi posix_mkdir=false case $umask in *[123567][0-7][0-7]) # POSIX mkdir -p sets u+wx bits regardless of umask, which # is incompatible with FreeBSD 'install' when (umask & 300) != 0. ;; *) tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 if (umask $mkdir_umask && exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 then if test -z "$dir_arg" || { # Check for POSIX incompatibilities with -m. # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or # other-writeable bit of parent directory when it shouldn't. # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. ls_ld_tmpdir=`ls -ld "$tmpdir"` case $ls_ld_tmpdir in d????-?r-*) different_mode=700;; d????-?--*) different_mode=755;; *) false;; esac && $mkdirprog -m$different_mode -p -- "$tmpdir" && { ls_ld_tmpdir_1=`ls -ld "$tmpdir"` test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" } } then posix_mkdir=: fi rmdir "$tmpdir/d" "$tmpdir" else # Remove any dirs left behind by ancient mkdir implementations. rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null fi trap '' 0;; esac;; esac if $posix_mkdir && ( umask $mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" ) then : else # The umask is ridiculous, or mkdir does not conform to POSIX, # or it failed possibly due to a race condition. Create the # directory the slow way, step by step, checking for races as we go. case $dstdir in /*) prefix='/';; -*) prefix='./';; *) prefix='';; esac eval "$initialize_posix_glob" oIFS=$IFS IFS=/ $posix_glob set -f set fnord $dstdir shift $posix_glob set +f IFS=$oIFS prefixes= for d do test -z "$d" && continue prefix=$prefix$d if test -d "$prefix"; then prefixes= else if $posix_mkdir; then (umask=$mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break # Don't fail if two instances are running concurrently. test -d "$prefix" || exit 1 else case $prefix in *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; *) qprefix=$prefix;; esac prefixes="$prefixes '$qprefix'" fi fi prefix=$prefix/ done if test -n "$prefixes"; then # Don't fail if two instances are running concurrently. (umask $mkdir_umask && eval "\$doit_exec \$mkdirprog $prefixes") || test -d "$dstdir" || exit 1 obsolete_mkdir_used=true fi fi fi if test -n "$dir_arg"; then { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 else # Make a couple of temp file names in the proper directory. dsttmp=$dstdir/_inst.$$_ rmtmp=$dstdir/_rm.$$_ # Trap to clean up those temp files at exit. trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 # Copy the file name to the temp name. (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && # and set any options; do chmod last to preserve setuid bits. # # If any of these fail, we abort the whole thing. If we want to # ignore errors from any of these, just make sure not to ignore # errors from the above "$doit $cpprog $src $dsttmp" command. # { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && # If -C, don't bother to copy if it wouldn't change the file. if $copy_on_change && old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && eval "$initialize_posix_glob" && $posix_glob set -f && set X $old && old=:$2:$4:$5:$6 && set X $new && new=:$2:$4:$5:$6 && $posix_glob set +f && test "$old" = "$new" && $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 then rm -f "$dsttmp" else # Rename the file to the real destination. $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || # The rename failed, perhaps because mv can't rename something else # to itself, or perhaps because mv is so ancient that it does not # support -f. { # Now remove or move aside any old file at destination location. # We try this two ways since rm can't unlink itself on some # systems and the destination file might be busy for other # reasons. In this case, the final cleanup might fail but the new # file should still install successfully. { test ! -f "$dst" || $doit $rmcmd -f "$dst" 2>/dev/null || { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } } || { echo "$0: cannot unlink or rename $dst" >&2 (exit 1); exit 1 } } && # Now rename the file to the real destination. $doit $mvcmd "$dsttmp" "$dst" } fi || exit 1 trap '' 0 fi done # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: skytools-3.2.6/lib/mk/std-autogen.sh0000755000000000000000000000234312166266754014266 0ustar #! /bin/sh # autogen for non-automake trees # # - it installs files: config.sub, config.guess, install-sh # - it installs ltmain.sh, if LT_INIT or *LIBTOOL macro is used # set -e USUAL_DIR="$1" test -n "${USUAL_DIR}" || USUAL_DIR="." test -f "${USUAL_DIR}/m4/usual.m4" || { echo usage: $0 USUAL_DIR exit 1 } # default programs ACLOCAL=${ACLOCAL:-aclocal} AUTOCONF=${AUTOCONF:-autoconf} AUTOHEADER=${AUTOHEADER:-autoheader} # detect first glibtoolize then libtoolize if test "x$LIBTOOLIZE" = "x"; then LIBTOOLIZE=glibtoolize which $LIBTOOLIZE >/dev/null 2>&1 \ || LIBTOOLIZE=libtoolize fi # # Workarounds for libtoolize randomness - it does not update # the files if they exist, except it requires install-sh. # rm -f config.guess config.sub install-sh ltmain.sh libtool cp -p ${USUAL_DIR}/mk/install-sh . if ${LIBTOOLIZE} --help | grep "[-][-]install" > /dev/null; then ${LIBTOOLIZE} -i -f -q -c else ${LIBTOOLIZE} -c fi # drop ltmain.sh if libtool is not used grep -E 'LT_INIT|LIBTOOL' configure.ac > /dev/null \ || rm -f ltmain.sh # Now generate configure & config.h ${ACLOCAL} -I ${USUAL_DIR}/m4 grep AC_CONFIG_HEADER configure.ac > /dev/null \ && ${AUTOHEADER} ${AUTOCONF} # clean junk rm -rf autom4te.* aclocal* skytools-3.2.6/lib/mk/antimake.txt0000644000000000000000000003216312166266754014032 0ustar = antimake.mk(5) = == NAME == antimake - Minimal Automake syntax on plain GNU Make == DESCRIPTION == Antimake makes possible to use GNU Automake conventions to describe builds in ordinary Makefiles for GNU Make. It's main abstractions are target lists and target variables. Target list describes target type and where to install. Target variables give source files and additional flags for build. == EXAMPLE == ------------------- # target list bin_PROGRAMS = prog # target variables for 'prog' prog_SOURCES = prog.c prog.h prog_LDADD = libutil.a # target list noinst_LIBRARIES = libutil.a # target variables for 'libutil.a' libutil_a_SOURCES = util.c util.h # load Antimake include antimake.mk ------------------- == Terminology == Primary:: target type, describes how to build and install particular type of targets. Target:: a file that needs to be built and/or installed. Distribute:: Include file in source .tar.gz. Non-distributed files are skipped when building .tar.gz and are cleaned during `make distclean`. Source:: Source files are files that appear in `..._SOURCES` per-target variable. They are distributed by default. They may or may not result in object files. It's fine to put both `.h` and `.c` files into _SOURCES. == TARGET LISTS == Target lists are variables that contain file names that need to be built and installed. They are specially named so that the name also describes how they are built, how and where they will be installed. The target list name contains 3 parts, separated with underscore, in following order: 1. Optional flags. Flags are: `nodist`, `dist`, `nobase`, `base`. (Default: `base`, `nodist`) 2. Destination directory name. Destination directory called *bin* actual location is stored in Make variable `$(bindir)`. Some common values: `bin`, `lib`, `include`. There are more and the list can be extended. Special name `noinst` means the target file should not be installed. 3. Target type, also called "primary". This will describe how the target needs to be built. Common values: `PROGRAMS`, `LIBRARIES`, `DATA` For details, what the various values mean, see next sections. .Examples: ---------------- bin_PROGRAMS = prog1 prog2 # flags: base, nodist # dest: $(bindir) # type: PROGRAMS noinst_LIBRARIES = lib1.a lib2.a # flags: base, nodist # dest: noinst # type: LIBRARIES nobase_dist_doc_DATA = docs/README # flags: dist, nobase # dest: $(docdir)/docs # type: DATA ---------------- === Primaries === `PROGRAMS`:: executable programs, linked together from objects built from source files `LIBARIES`:: static libraries, linked together from objects built from source files `LTLIBRARIES`:: dynamic or static libraries, linked together from objects built from source files `HEADERS`:: header files, no default build method, the target files have `dist` flag by default. `MANS`:: man pages, no default build method, installed into manX subdir. `SCRIPTS`:: scripts, executable file, no default build method `DATA`:: data, non-executable file, no default build method === Target list flags === `dist`:: The target should be distributed with other sources. Default for `HEADERS` type, others have `nodist` by default. `nodist`:: Target is not distributed and should be cleaned with distclean. Default for all primaries, except `HEADERS`. `base`:: On install relative path is ignored, all files end up in destination directory. Always default. `nobase`:: On install relative path is kept. Eg: if `includedir=/usr/include` then `nobase_include_HEADERS=mylib/common.h` is installed to `/usr/include/mylib/common.h`. `noinst`:: Target is built as part of build process, but is not installed. `EXTRA`:: Targets in such list are not built, nor installed. Useful to make sure that sources for dynamically configured targets will end up in source tarball. Unlike other target list ariables, `EXTRA_` may contain targets already defined in other target lists, they will be filtered out from this list then. == Target variables == Only big targets take additional variables: `PROGRAMS`/`LIBRARIES`/`LTLIBRARIES`. `_SOURCES`:: All source files, *.c *.h *.cpp *.hpp. `nodist__SOURCES`:: Source files that should not be distributed. `EXTRA__SOURCES`:: In case tgt_SOURCES is dynamic, here is non-dynamic list of sources for distribution. Only dynamic sources need to be listed here. `_DEPENDENCIES`:: Add dependencies that need to be build before target build will start. `_CFLAGS`, `_CPPFLAGS`, `_LDFLAGS`, `_LIBTOOLFLAGS`:: Override corresponging AM_xx variable `_LDADD`:: Add dependencies that are used during linking. For PROGRAMS only. They will be added to linker command line. `_LIBADD`:: Add dependencies that are used during linking. For LIBRARIES/LTLIBRARIES only. They will be added to linker command line. `_AR`:: Overrides $(AR) $(ARFLAGS). For LIBRARIES only. .Example: ------------------- bin_PROGRAMS = prog prog_SOURCE = main.c util.c util.h prog_CFLAGS = $(GTK_CFLAGS) prog_LDADD = $(GTK_LIBS) ------------------- == Global variables == They can be set before `antimake.mk` inclusion to change build behaviour. EXTRA_DIST:: Additional files to include in source archive. CLEANFILES:: Additional files to `make clean`. DISTCLEANFILES:: Additional files to `make distclean`. MAINTAINERCLEANFILES:: Additional files to `make maintainer-clean`. SUBDIRS:: Subdirectories of current directory where Make needs to be recursively launched. If subdirectory `Makefile` is Antimake-base, it should set `SUBLOC`. SUBLOC:: Current diretory location in overall source tree. This can stay unset in top directory. Needed for subdirectiories entered with `SUBDIRS` to find its position in source tree. DIST_SUBDIRS:: Subdirs that only `make dist`, `make distclean` and `make maintainer-clean` will enter. EMBED_SUBDIRS:: Subdirectories that are built non-recursively: they need to contain `Makefile.am` that contains makefile-fragment with Antimake syntax that describes local targets using relative filenames. The fragment is included in main makefile and file and variable names are converted and merged with top-level targets. AM_FEATURES:: List of extensions to load. Extensions are Makefile fragments that are loaded before actual rules are generated, so they can change or add targets. === More details on EMBED_SUBDIRS === It acts like `include $(dir)/Makefile.am` for each directory, except it converts file and variable names. Example: --------------------- Makefile: EMBED_SUBDIRS = src src/Makefile.am: bin_PROGRAMS = hello hello_SOURCES = main.c hello_CPPFLAGS = -I./include --------------------- Conversion results as if top-level `Makefile` had contained following rows: ---------------------- bin_PROGRAMS += src/hello src_hello_SOURCES = src/main.c src_hello_CPPFLAGS = -I./src/include ---------------------- Variables, where file names are converted: * SUBDIRS, DIST_SUBDIRS, EMBED_SUBDIRS * DISTFILES, CLEANFILES, DISTCLEANFILES, MAINTAINERCLEANFILES * target lists * _SOURCES, _LDADD, _LIBADD Variables, where -L and -I flags are converted: * _CFLAGS * _CPPFLAGS * _LDFLAGS Makefile should be written in a way that those conversions would be enough. === Global variables for current location === * srcdir, builddir - relative path to source dir and build dir. * top_srcdir, top_builddir - relative path to top-level source and build dir. * abs_srcdir, abs_builddir - absolute path to source and build dir * abs_top_srcdir, abs_top_builddir - absolute path to top-level source and build dir * nosub_top_srcdir, nosub_top_builddir - relative path from top of builddir to srcdir and builddir. === Global variables that target can override === - AM_CPPFLAGS - AM_CFLAGS - AM_LDFLAGS - AM_LIBTOOLFLAGS - AM_DEFS - AM_MAKEFLAGS === Global variables from autoconf === These variables come usually from autoconf, but also have reasonable defaults: CC, DEFS, CPPFLAGS, CFLAGS, LDFLAGS, LIBS, LIBTOOL, LIBTOOLFLAGS, AR, ARFLAGS, RANLIB, CXX, CXXFLAGS, INSTALL, MKDIR_P, LN_S === Global variables for extending Antimake === AM_DIST_DEFAULT:: Default format(s) for `make dist` target. One or more of: `gzip`, `bzip2`, `xz`, `zip`. Default: `gzip`. AM_DESTINATIONS:: Additional directory names to consider as valid destinations. Expects corresponding `dir`-variable to be set. AM_SMALL_PRIMARIES:: Additional single-file primaries. (Builtin: HEADERS, SCRIPTS, DATA, MANS) AM_BIG_PRIMARIES:: Additional primaries built from objects. (Builtin: PROGRAMS, LIBRARIES, LTLIBRARIES) AM_LANGUAGES:: Additional language names. Antimake expects variables `AM_LANG_$(name)_SRCEXTS`, `AM_LANG_$(name)_COMPILE` and `AM_LANG_$(name)_LINK` to be set. === Variables for command-line usage === DESTDIR:: Relocate installation root. AM_TRACE:: Turns on function-call debug info. Can be set from command-line. === Hacking variables === GNUMAKE380, GNUMAKE381, GNUMAKE382:: If we have at least that version of GNU Make. GNUMAKE380 is always set, others may not be. If Makefile uses features from newer GNU Make it would be good idea to use those flags and error out with clear error message, instead having mysterious failures. === Libtool flags === Useful http://www.gnu.org/software/libtool/manual/html_node/Link-mode.html[Libtool] flags that can be put int tgt_LDFLAGS for a LTLIBRARY: * -export-dynamic * -export-symbols symfile * -export-symbols-regex regex * -module See libtool http://www.gnu.org/software/libtool/manual/html_node/Versioning.html["Versioning"] chapter about those: * -avoid-version * -version-info current[:revision[:age]] * -version-number major[:minor[:revision]] * -release major[:minor[:revision]] == Top-level pseudo-targets == === all === The default target when no other target is given on command-line. Builds all target files. ==== Simple targets ==== These are simple - either the file already exists, or the user needs to give build command. ==== Object-based targets ==== The targets in primaries PROGRAMS, LIBRARIES and LTLIBRARIES consist of multiple source files that need to be compiled into objects. Then the objects need to be linked into final target. The process is roughly following: . Dependencies are built (_LDADD, _LIBADD, _DEPENDENCIES). . Source list is filtered for extensions that can be compiled into object files, object file list is created based on them. The rest of files are used and dependencies for target, but otherwise ignored. . Object files are built. . Linker is picked based on source files - as there can be files in multiple languages, the most advanced language wins (the one that appears later in `AM_LANGUAGES`) . Final executable is linked. === install === Install all targets to their destination directories, which is mentioned in their target list variable name. Eg. `bin_PROGRAMS` will be installed to `$(bindir)`. If destination is named `noinst`, it will not be installed. If the flag `nobase` is given, the relative filename is kept, otherwise basename is taken and it will appear directly under destination directory. .Example: ------ include_HEADERS = func1.h lib/func2.h # Files will end up in: # $(includedir)/func1.h # $(includedir)/func2.h nobase_include_HEADERS = func1.h lib/func2.h # Files will end up in: # $(includedir)/func1.h # $(includedir)/lib/func2.h ------ === clean === - Remove files in `$(CLEANFILES)` - Remove built objects. - Remove target files, unless they are marked as `dist`. (Note: `HEADERS` primary is `dist` by default, all other are `nodist`) === distclean === - Remove files in `$(DISTCLEANFILES)` - Remove sources tagged with `nodist`. All sources as `dist` by default. === maintainer-clean === - Remove files in `$(MAINTAINERCLEANFILES)` === help === Describe top-level targets. === am-test === Regression test for low-level Antimake functions. === am-debug === Show Antimake internal state. == FEATURES == Done: - Big primaries: PROGRAMS, LIBRARIES, LTLIBRARIES - Small primaries: DATA, SCRIPTS, MANS, HEADERS - Flags: base nobase dist nodist noinst EXTRA - Target vars: SOURCES, CPPFLAGS, CFLAGS, LDFLAGS, LDADD/LIBADD - Separate build dir - Per-target objects - Languages: C, CXX - SUBDIRS, DIST_SUBDIRS - EMBED_SUBDIRS Todo: - Improve docs - Standardize and document how to extend - Deps with non-gcc? - Long if-s to support `O=` seems to break GNU Make 3.80. Drop `O=` or drop 3.80? Probably out of scope: - `make uninstall` - `make distcheck` - `make dist` from separate build dir - `install-(exec|data)-hook` - based on dir not primary - Default file list for `EXTRA_DIST`. (Problem: distclean / maintainer-clean) Definitely out of scope: - automake conditionals - automake extras (autoconf macros, ltdl) - automake nanny mode (gnu/gnits) == SEE ALSO == GNU Make Reference: http://www.gnu.org/software/make/manual/make.html#Quick-Reference[] Recursive Make Considered Harmful: http://miller.emu.id.au/pmiller/books/rmch/[] Paul's Rules of Makefiles: http://make.mad-scientist.us/rules.html[] Small BSD-ish build system: https://webkeks.org/hg/buildsys/[] GNU Make Standard Library: http://sourceforge.net/projects/gmsl/[] skytools-3.2.6/lib/mk/Makefile.am0000644000000000000000000000032712166266754013531 0ustar pkgconfig_DATA = libusual.pc dist_pkgdata_SCRIPTS = antimake.mk std-autogen.sh amext-libusual.mk amext-modes.mk amext-msvc.mk amext-cxx.mk DISTCLEANFILES = libusual.pc EXTRA_DIST = antimake.txt safe-headers.sed skytools-3.2.6/lib/mk/temos/0000755000000000000000000000000012166266754012622 5ustar skytools-3.2.6/lib/mk/temos/libtemo.sh0000644000000000000000000000314712166266754014616 0ustar LANG=C LC_ALL=C export LANG LC_ALL PATH=`pwd`/bin:$PATH export PATH set -e set -o pipefail SH="bash" unset MAKELEVEL MAKEFLAGS export MAKELEVEL MAKEFLAGS code=0 # we want to test local commits real_repo=../../.. # but final html should have fixed public url show_repo=git://github.com/libusual/libusual.git usual_clone() { enter_code echo "$ git clone $show_repo" "$@" git clone $real_repo "$@" } test_start() { rm -rf tmp mkdir tmp cd tmp } enter_code() { if test "$code" = "0"; then echo "---------------------------------" code=1 fi } leave_code() { if test "$code" = "1"; then echo "---------------------------------" code=0 fi } ls() { /bin/ls -C "$@" } title() { leave_code echo "" echo "=" "$@" "=" echo "" } title2() { leave_code echo "" echo "==" "$@" "==" echo "" } title3() { leave_code echo "" echo "===" "$@" "===" echo "" } run() { enter_code echo "$ $*" case "$1" in cd|ls|export) $* ;; *) $SH -c "$*" 2>&1 esac } runq() { enter_code echo "$ $*" echo "[...]" $SH -c "$*" > quiet.log 2>&1 || { tail -5 quiet.log; exit 1; } rm -f quiet.log } msg() { leave_code echo "" echo "$@" echo "" } longmsg() { leave_code echo "" sed 's/^ //' echo "" } cat_file() { leave_code mkdir -p `dirname $1` echo ".File: $1" case "$1" in *Makefile) echo "[source,makefile]" ;; *.[ch]) echo "[source,c]" ;; *.ac) echo "[source,autoconf]" ;; *.sh) echo "[source,shell]" ;; esac echo "-----------------------------------" sed 's/^ //' > $1 cat $1 echo "-----------------------------------" } skytools-3.2.6/lib/mk/temos/src/0000755000000000000000000000000012166266754013411 5ustar skytools-3.2.6/lib/mk/temos/src/antimake3.temo0000644000000000000000000000725612166266754016165 0ustar . ./libtemo.sh || exit 1 test_start title Test Antimake EMBED_SUBDIRS longmsg <<-"MSG" Antimake variable `EMBED_SUBDIRS` list names of directories that contains Makefile fragmants that are to be embedded into current Makefile. - Plus: Proper dependencies, work well with parallel Make. - Minus: Cannot go into subdir and run make there. - Minus: Fragments are not stand-alone, so need some care when writing. MSG title2 Intro to EMBED_SUBDIRS longmsg <<-"MSG" To better understand what EMBED_SUBDIRS does, let\'s start with simple case - single Makefile that references files under subdir: MSG run mkdir -p src run cp ../../antimake.mk . cat_file Makefile <<"EOF" bin_PROGRAMS = src/myprog src_myprog_SOURCES = src/myprog.c include antimake.mk EOF cat_file src/myprog.c <<"EOF" #include int main(void) { printf("myprog\n"); return 0; } EOF run make run ./src/myprog run make clean longmsg <<"MSG" Now you can put the lines that reference files under `src/` also into `src` and include that from top-level Makefile: MSG cat_file src/Makefile.inc <<"EOF" bin_PROGRAMS = src/myprog src_myprog_SOURCES = src/myprog.c EOF cat_file Makefile <<"EOF" include src/Makefile.inc include antimake.mk EOF run make run ./src/myprog longmsg <<"MSG" This works but the problem is that although the Makefile is local, it still sees files from top-Makefile-level. So that is what `EMBED_SUBDIRS` helps with - it allow to use local filenames in Makefile fragment, and it converts them to top-level filenames when including. It knows only few type of variables it needs to convert: - target filenames in primares lists (*_PROGRAMS, *_LIBRARIES, etc) - target_SOURCES: foo_SOURCES -> sub_dir_foo_SOURCES with filename conversion - other target variables: `foo_*` -> `sub_dir_foo_*` without filename conversion - EXTRA_DIST, CLEANFILES, DISTCLEANFILES, MAINTAINERCLEANFILES Any other variables stay untouched, and obviously they can mess up top-level variables. So the included Makefile should be as clean as possible. MSG title2 Setup source tree for EMBED_SUBDIRS msg Setup directories, install Antimake run mkdir -p lib1/sublib lib2 run cp ../../antimake.mk . msg Prepare sources cat_file main.c <<"EOF" #include void func1(void); void func2(void); void func3(void); int main(void) { func1(); func2(); func3(); printf("main\n"); return 0; } EOF cat_file lib1/func1.c <<"EOF" #include void func1(void) { printf("func1\n"); } EOF cat_file lib1/sublib/func2.c <<"EOF" #include void func2(void) { printf("func2\n"); } EOF cat_file lib2/func3.c <<"EOF" #include void func3(void) { printf("func3\n"); } EOF msg Prepare Makefiles cat_file Makefile <<"EOF" PACKAGE_NAME = test-subdirs PACKAGE_VERSION = 1.0 EMBED_SUBDIRS = lib1 lib1/sublib lib2 bin_PROGRAMS = prog prog_SOURCES = main.c prog_LDADD = lib1/func1.a lib1/sublib/func2.a lib2/func3.a EXTRA_DIST = Makefile antimake.mk include antimake.mk EOF cat_file lib1/Makefile.am <<"EOF" noinst_LIBRARIES = func1.a func1_a_SOURCES = func1.c EXTRA_DIST = Makefile.am EOF cat_file lib1/sublib/Makefile.am <<"EOF" noinst_LIBRARIES = func2.a func2_a_SOURCES = func2.c EXTRA_DIST = Makefile.am EOF cat_file lib2/Makefile.am <<"EOF" noinst_LIBRARIES = func3.a func3_a_SOURCES = func3.c EXTRA_DIST = Makefile.am EOF title2 Building msg Build the project run make run ls run ./prog msg We can now install it: run make install prefix=/opt DESTDIR=./inst run ls ./inst/opt/bin msg Now we can create package that can be given to others. run make dist run ls run 'tar tzf test-subdirs-1.0.tar.gz | sort' msg Clean the tree run make clean run ls msg Done! skytools-3.2.6/lib/mk/temos/src/antimake1.temo0000644000000000000000000000267312166266754016161 0ustar . ./libtemo.sh || exit 1 test_start title Test Antimake title2 Simplest usage msg Here we avoid use of any autotools. msg First, this is the source file: cat_file hello.c <<"EOF" #include int main(void) { printf("Hello, world\n"); return 0; } EOF msg Here is corresponding Makefile: cat_file Makefile <<"EOF" # This is target list - it's name describes target type # and how it is installed, it's value target files to be built. # bin - the targets will be installed under $(bindir) # PROGRAMS - the target is executable built from many sources bin_PROGRAMS = hello # The target 'hello'-s source file list. hello_SOURCES = hello.c # Run Antimake include antimake.mk EOF msg Also install Antimake and we are ready to build: run cp ../../antimake.mk . run ls msg Build the project run make run ls run ./hello msg We can even install it already: run make install prefix=/opt DESTDIR=./inst run ls ./inst/opt/bin msg For creating source package, we need to provide additional info: cat_file Makefile <<"EOF" # Package name and version for tarball filename PACKAGE_NAME = myhello PACKAGE_VERSION = 1.0 # Non-source files to put into tarball EXTRA_DIST = Makefile antimake.mk bin_PROGRAMS = hello hello_SOURCES = hello.c include antimake.mk EOF msg Now we can create package that can be given to others. run make dist run ls run 'tar tzf myhello-1.0.tar.gz | sort' msg Clean the tree run make clean run ls msg Done! skytools-3.2.6/lib/mk/temos/src/libusual1.temo0000644000000000000000000000241712166266754016204 0ustar . ./libtemo.sh || exit 1 test_start title Use libusual the simplest way longmsg <<-MSG Simplest usage would be to configure and build libusual locally and point your projects CPPFLAGS and LDFLAGS there. That way you get access to not only code but also various autoconfigued symbols without any complexities in your project. MSG title2 Build libusual usual_clone lib run cd lib runq ./autogen.sh runq ./configure --disable-shared --prefix=/opt runq make runq make install 'DESTDIR=`pwd`/../inst' run cd .. title2 Build our own code msg Now we prepare our own code. msg First, this is the source file: cat_file prog.c <<"EOF" #include #include #include int main(void) { const char *data = "CECSFXX"; uint32_t crc; crc = calc_crc32(data, strlen(data), 0); printf("crc: %08x\n", crc); return 0; } EOF msg Here is corresponding Makefile: cat_file Makefile <<"EOF" # here we describe our program SRCS = prog.c OBJS = $(SRCS:.c=.o) # here we link to libusual CPPFLAGS = -I./inst/opt/include LDFLAGS = -L./inst/opt/lib LIBS = -lusual CC = gcc CFLAGS = -O -g -Wall all: prog prog: $(OBJS) $(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) $(LIBS) -o $@ EOF msg Build the project run make run ls run ./prog msg Done! skytools-3.2.6/lib/mk/temos/src/libusual3.temo0000644000000000000000000000342112166266754016202 0ustar . ./libtemo.sh || exit 1 test_start title Using Autoconf and embedded libusual longmsg <<-"MSG" MSG title2 Fetch libusual msg Here we close libusual repo, but do not configure nor build it. usual_clone lib msg Autoconf setup cat_file autogen.sh <<"EOF" # use prepared autgen logic ./lib/mk/std-autogen.sh ./lib # fetch Antimake template from libusual cp lib/mk/antimake.mk antimake.mk.in EOF cat_file configure.ac <<"EOF" AC_INIT([achello], [0.1], [https://libusual.github.com]) AC_CONFIG_SRCDIR([prog.c]) AC_CONFIG_HEADER([lib/usual/config.h]) AC_PREREQ([2.59]) AC_USUAL_PORT_CHECK AC_USUAL_PROGRAM_CHECK AC_USUAL_HEADER_CHECK AC_USUAL_TYPE_CHECK AC_USUAL_FUNCTION_CHECK AC_OUTPUT([antimake.mk]) EOF msg Here is the source that needs to be linked with libusual: cat_file prog.c <<"EOF" #include #include #include int main(void) { const char *data = "CECSFXX"; uint32_t crc; crc = calc_crc32(data, strlen(data), 0); printf("crc: %08x\n", crc); return 0; } EOF msg Antimake based Makefile cat_file Makefile <<"EOF" # the automake-style build description for 'prog' noinst_PROGRAMS = prog prog_SOURCES = prog.c # location of configured libusual USUAL_DIR = lib # mention that 'prog' wants embedded libusual prog_EMBED_LIBUSUAL = 1 AM_FEATURES = libusual # clean configured files DISTCLEANFILES = config.status config.log \ antimake.mk $(USUAL_DIR)/usual/config.h # clean generated files MAINTAINERCLEANFILES = configure config.guess config.sub install-sh \ antimake.mk.in $(USUAL_DIR)/usual/config.h.in # launch Antimake include $(USUAL_DIR)/mk/antimake.mk EOF msg Build the project run sh ./autogen.sh runq ./configure run make run ls run ./prog run make maintainer-clean run ls msg Done skytools-3.2.6/lib/mk/temos/src/libusual4.temo0000644000000000000000000000303512166266754016204 0ustar . ./libtemo.sh || exit 1 test_start title Use installed libusual longmsg <<-MSG Install libusual and link against it. MSG title2 Build libusual usual_clone libusual run cd libusual runq ./autogen.sh runq './configure --disable-static --prefix=`pwd`/../inst' runq make runq make install run cd .. run 'find inst | sort' title2 Build our own code msg Now we prepare our own code. msg First, this is the source file: cat_file prog.c <<"EOF" #include #include #include int main(void) { const char *data = "CECSFXX"; uint32_t crc; crc = calc_crc32(data, strlen(data), 0); printf("crc: %08x\n", crc); return 0; } EOF msg Here is corresponding Makefile: cat_file Makefile <<"EOF" CC = gcc CFLAGS = -O -g -Wall # here we describe our program SRCS = prog.c OBJS = $(SRCS:.c=.o) # put libusual flags to proper place CPPFLAGS = $(USUAL_CPPFLAGS) LIBS = $(USUAL_LIBS) # use pkg-config to get libusual info USUAL_CPPFLAGS = $(shell $(PKG_CONFIG) --cflags libusual) USUAL_LIBS = $(shell $(PKG_CONFIG) --libs libusual) # temo hacks to support local install, not needed otherwise PKG_CONFIG := PKG_CONFIG_PATH=$(CURDIR)/inst/lib/pkgconfig pkg-config CPPFLAGS := $(subst $(CURDIR)/libusual/../,./,$(CPPFLAGS)) LIBS := $(subst $(CURDIR)/libusual/../,./,$(LIBS)) all: prog %.o: %.c $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< prog: $(OBJS) $(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) $(LIBS) -o $@ EOF msg Build the project run make run ls run LD_LIBRARY_PATH=./inst/lib ./prog msg Done! skytools-3.2.6/lib/mk/temos/src/libusual2.temo0000644000000000000000000000473412166266754016211 0ustar . ./libtemo.sh || exit 1 test_start title Embedding libusual as part of the source. longmsg <<-"MSG" Here we build libusual as part of top-level tree. This gives the advantage of building only the modules that are actually used in main tree and without the intermediate `libusual.a` step. This method is for projects that are developed in parallel with libusual. Not recommended for casual usage. MSG title2 Configure libusual msg Here we configure libusual, but do not build it. usual_clone lib run cd lib runq ./autogen.sh runq ./configure run cd .. title2 Prepare own code msg Here is the source that needs to be linked with libusual: cat_file prog.c <<"EOF" #include #include #include int main(void) { const char *data = "CECSFXX"; uint32_t crc; crc = calc_crc32(data, strlen(data), 0); printf("crc: %08x\n", crc); return 0; } EOF title2 Old way, with plain Make msg Here is corresponding Makefile: cat_file Makefile <<"EOF" CC = gcc CFLAGS = -O -g -Wall # here we describe our program SRCS = prog.c OBJS = $(SRCS:.c=.o) # here we link to libusual USUAL_DIR = ./lib USUAL_LOCAL_SRCS = $(SRCS) CPPFLAGS = $(USUAL_CPPFLAGS) OBJS += $(USUAL_OBJS) # this looks what modules are used by files in USUAL_LOCAL_SRCS # and fills the USUAL_OBJS variable based on that include $(USUAL_DIR)/Setup.mk all: prog prog: $(OBJS) $(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) $(LIBS) -o $@ %.o: %.c $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< # special rule to build %.o: $(USUAL_DIR)/usual/%.c $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< clean: rm -f *.o prog EOF msg Build the project run make run ls run ./prog run make clean run ls longmsg <<-"MSG" It's quite complex and that is even without trying to get dependencies rigth. See next section for preferred way. MSG title2 New way, with Antimake. longmsg <<-"MSG" Antimake is build framework on plain GNU Make that reads build instructons with Automake syntax. It has also hooks for libusual integration. Here is Makefile that uses Antimake: MSG cat_file Makefile <<"EOF" # the automake-style build description for 'prog' noinst_PROGRAMS = prog prog_SOURCES = prog.c # location of configured libusual USUAL_DIR = lib # mention that 'prog' wants embedded libusual prog_EMBED_LIBUSUAL = 1 AM_FEATURES = libusual # launch Antimake include $(USUAL_DIR)/mk/antimake.mk EOF msg Build the project run make run ls run ./prog run make clean run ls msg Done skytools-3.2.6/lib/mk/temos/src/libusual6.temo0000644000000000000000000000465212166266754016214 0ustar . ./libtemo.sh || exit 1 test_start title Use installed libusual with autoconf and antimake. longmsg <<-MSG Install libusual and link against it. MSG title2 Build libusual usual_clone libusual run cd libusual runq ./autogen.sh runq './configure --disable-shared --prefix=`pwd`/../inst' runq make runq make install run cd .. title2 Build our own code msg Now we prepare our own code. msg First, this is the source file: cat_file prog.c <<"EOF" #include #include #include int main(void) { const char *data = "CECSFXX"; uint32_t crc; crc = calc_crc32(data, strlen(data), 0); printf("crc: %08x\n", crc); return 0; } EOF msg Autoconf setup cat_file autogen.sh <<"EOF" # use prepared autgen logic ../../std-autogen.sh ../../.. # fetch Antimake template from libusual cp ../../antimake.mk antimake.mk.in EOF cat_file extra.mk.in <<"EOF" USUAL_ANTIMAKE = @USUAL_ANTIMAKE@ USUAL_CFLAGS = @USUAL_CFLAGS@ USUAL_LIBS = @USUAL_LIBS@ EOF cat_file configure.ac <<"EOF" AC_INIT([achello], [0.1], [https://libusual.github.com]) AC_CONFIG_HEADER([config.h]) AC_CONFIG_SRCDIR([prog.c]) AC_PREREQ([2.59]) AC_USUAL_INIT AC_USUAL_PROGRAM_CHECK PKG_CHECK_MODULES(USUAL, libusual) _PKG_CONFIG([USUAL_ANTIMAKE], [variable=antimake], [libusual]) AC_SUBST([USUAL_ANTIMAKE]) AC_OUTPUT([antimake.mk extra.mk]) EOF msg Here is the source that needs to be linked with libusual: cat_file prog.c <<"EOF" #include #include #include int main(void) { const char *data = "CECSFXX"; uint32_t crc; crc = calc_crc32(data, strlen(data), 0); printf("crc: %08x\n", crc); return 0; } EOF msg Antimake based Makefile cat_file Makefile <<"EOF" # the automake-style build description for 'prog' noinst_PROGRAMS = prog prog_SOURCES = prog.c prog_CPPFLAGS = $(USUAL_CFLAGS) prog_LDADD = $(USUAL_LIBS) # clean configured files DISTCLEANFILES = config.status config.log extra.mk antimake.mk config.h # clean generated files MAINTAINERCLEANFILES = configure config.guess config.sub install-sh \ antimake.mk.in extra.mk.in config.h.in EXTRA_DIST = Makefile $(MAINTAINERCLEANFILES) # launch Antimake include extra.mk include antimake.mk EOF msg Build the project run sh ./autogen.sh runq 'PKG_CONFIG_PATH=`pwd`/inst/lib/pkgconfig ./configure' run make run ls run ./prog run make am-show-distfiles run make maintainer-clean run ls msg Done! skytools-3.2.6/lib/mk/temos/src/antimake5.temo0000644000000000000000000000344712166266754016165 0ustar . ./libtemo.sh || exit 1 test_start title Shared libraries and autoconf msg Autoconf setup cat_file autogen.sh <<"EOF" ../../std-autogen.sh ../../.. # fetch Antimake template from libusual cp ../../antimake.mk antimake.mk.in EOF cat_file configure.ac <<"EOF" AC_INIT([actest], [0.1]) AC_CONFIG_SRCDIR([prog.c]) AC_PREREQ([2.59]) LT_INIT AC_USUAL_INIT AC_USUAL_PROGRAM_CHECK AC_OUTPUT([antimake.mk]) EOF msg Here are the source files: cat_file prog.c <<"EOF" void func1(void); int main(void) { func1(); return 0; } EOF cat_file func.c <<"EOF" #include void func1(void); void func1(void) { printf("hello from func1\n"); } EOF msg Antimake based Makefile cat_file Makefile <<"EOF" lib_LTLIBRARIES = libtemo.la libtemo_la_SOURCES = func.c libtemo_la_LDFLAGS = -version-info 3:0:2 bin_PROGRAMS = prog prog_SOURCES = prog.c prog_LDADD = libtemo.la # clean configured files DISTCLEANFILES = \ config.status \ config.log \ antimake.mk \ libtool # clean generated files MAINTAINERCLEANFILES = \ configure \ config.guess \ config.sub \ install-sh \ antimake.mk.in \ ltmain.sh EXTRA_DIST = \ Makefile \ $(MAINTAINERCLEANFILES) # launch Antimake include antimake.mk EOF msg Build the project run sh ./autogen.sh runq ./configure run make run ls run ./prog msg Create distribution package run make dist run 'tar tzf actest-0.1.tar.gz | sort' msg Test installation run 'make install DESTDIR=/tmp/test-inst' run ls run 'find /tmp/test-inst | sort' run rm -rf /tmp/test-inst msg Test the distribution package and separate build dir run mkdir -p test run cd test run tar xf ../actest-0.1.tar.gz run mkdir build run cd build runq ../actest-0.1/configure run make run ls run ./prog run cd ../.. msg Clean up run make maintainer-clean run ls msg Done skytools-3.2.6/lib/mk/temos/src/antimake6.temo0000644000000000000000000001043112166266754016155 0ustar . ./libtemo.sh || exit 1 test_start title Antimake stress-test msg Autoconf setup cat_file autogen.sh <<"EOF" ../../std-autogen.sh ../../.. # fetch Antimake template from libusual make -f ../../antimake.mk show-config > build.mk.in echo 'include $(abs_top_srcdir)/antimake.mk' >> build.mk.in ln -sf ../../antimake.mk . ln -sf ../../amext-cxx.mk . EOF cat_file configure.ac <<"EOF" AC_INIT([actest], [0.1]) AC_CONFIG_SRCDIR([esub/prog.c]) AC_PREREQ([2.59]) AC_USUAL_INIT LT_INIT AC_USUAL_PROGRAM_CHECK AC_PROG_CXX AC_OUTPUT([build.mk]) EOF msg Here are the source files: longmsg <<"EOF" ------------- ./prog.c ./cpptest.cpp ./sub/func1.c ./sub/esub/func2,c ./sub/sub/func3.c ./esub/func4.c ./esub/esub/func5.c ./esub/sub/func6.c ------------- EOF run mkdir -p sub/esub sub/sub esub/esub esub/sub cat_file esub/prog.c <<"EOF" #include "func1.h" #include "func2.h" #include "func3.h" #include "func4.h" #include "func5.h" #include "func6.h" #include int main(void) { printf("%s\n", __FILE__); func1(); func2(); func3(); func4(); func5(); func6(); return 0; } EOF cat_file cpptest.cpp <<"EOF" #include using namespace std; int main(void) { cout << "Hello" << endl; return 0; } EOF cat_file sub/func1.c <<"EOF" #include #include "func1.h" void func1(void) { printf("%s\n", __FILE__); } EOF cat_file sub/func1.h <<"EOF" void func1(void); EOF run 'sed s/1/2/ sub/func1.c > sub/esub/func2.c' run 'sed s/1/2/ sub/func1.h > sub/esub/func2.h' run 'sed s/1/3/ sub/func1.c > sub/sub/func3.c' run 'sed s/1/3/ sub/func1.h > sub/sub/func3.h' run 'sed s/1/4/ sub/func1.c > esub/func4.c' run 'sed s/1/4/ sub/func1.h > esub/func4.h' run 'sed s/1/5/ sub/func1.c > esub/sub/func5.c' run 'sed s/1/5/ sub/func1.h > esub/sub/func5.h' run 'sed s/1/6/ sub/func1.c > esub/esub/func6.c' run 'sed s/1/6/ sub/func1.h > esub/esub/func6.h' msg Now fill makefiles cat_file Makefile <<"EOF" SUBDIRS = sub EMBED_SUBDIRS = esub AM_FEATURES = cxx override WFLAGS = -Wall EXTRA_DIST = Makefile antimake.mk amext-cxx.mk $(MAINTAINERCLEANFILES) # clean configured files DISTCLEANFILES = \ config.status \ config.log \ libtool # clean generated files MAINTAINERCLEANFILES = \ configure \ config.guess \ config.sub \ install-sh \ build.mk.in \ ltmain.sh noinst_PROGRAMS = cpptest cpptest_SOURCES = cpptest.cpp # launch Antimake include build.mk EOF cat_file sub/Makefile <<"EOF" SUBLOC = sub SUBDIRS = sub EMBED_SUBDIRS = esub noinst_LIBRARIES = libfunc1.a libfunc1_a_SOURCES = func1.c func1.h EXTRA_DIST = Makefile include ../build.mk EOF cat_file sub/sub/Makefile <<"EOF" SUBLOC = sub/sub EXTRA_DIST = Makefile noinst_LIBRARIES = libfunc3.a libfunc3_a_SOURCES = func3.c func3.h include ../../build.mk EOF cat_file sub/esub/Makefile.am <<"EOF" noinst_LIBRARIES = libfunc2.a libfunc2_a_SOURCES = func2.c func2.h EXTRA_DIST = Makefile.am EOF cat_file esub/Makefile.am <<"EOF" SUBDIRS = sub EMBED_SUBDIRS = esub EXTRA_DIST = Makefile.am noinst_LIBRARIES = libfunc4.a libfunc4_a_SOURCES = func4.c func4.h noinst_PROGRAMS = prog prog_SOURCES = prog.c prog_LDFLAGS = -L../sub -L../sub/esub -L. -Lsub prog_LDADD = \ -lfunc1 \ -lfunc2 \ $(topdir)/sub/sub/libfunc3.a \ -lfunc4 \ -lfunc5 \ esub/libfunc6.a prog_CFLAGS = -I../sub prog_CPPFLAGS = \ -I../sub/esub \ -I$(topdir)/sub/sub \ -I. \ -Iesub \ -I./sub EOF cat_file esub/sub/Makefile <<"EOF" SUBLOC = esub/sub EXTRA_DIST = Makefile noinst_LIBRARIES = libfunc5.a libfunc5_a_SOURCES = func5.c func5.h include ../../build.mk EOF cat_file esub/esub/Makefile.am <<"EOF" EXTRA_DIST = Makefile.am noinst_LIBRARIES = libfunc6.a libfunc6_a_SOURCES = func6.c func6.h EOF msg Build the project run sh ./autogen.sh runq ./configure #run make esub/prog run make run ls run ./esub/prog msg Create distribution package run make dist run 'tar tzf actest-0.1.tar.gz | sort' msg Test installation run 'make install DESTDIR=`pwd`/inst' run ls run 'find inst | sort' msg Test the distribution package and separate build dir run mkdir -p test run cd test run tar xf ../actest-0.1.tar.gz run mkdir build run cd build runq ../actest-0.1/configure run make run ls run make esub/prog run ./esub/prog run cd ../.. msg Clean up run make maintainer-clean run ls msg Done skytools-3.2.6/lib/mk/temos/src/antimake4.temo0000644000000000000000000000267412166266754016165 0ustar . ./libtemo.sh || exit 1 test_start title Using Antimake with autoconf msg Autoconf setup cat_file autogen.sh <<"EOF" ../../std-autogen.sh ../../.. # fetch Antimake template from libusual cp ../../antimake.mk antimake.mk.in EOF cat_file configure.ac <<"EOF" AC_INIT([actest], [0.1]) AC_CONFIG_SRCDIR([prog.c]) AC_PREREQ([2.59]) AC_USUAL_INIT AC_USUAL_PROGRAM_CHECK AC_OUTPUT([antimake.mk]) EOF msg Here is the source we want to build: cat_file prog.c <<"EOF" #include #include int main(void) { printf("hello\n"); return 0; } EOF msg Antimake based Makefile cat_file Makefile <<"EOF" # the automake-style build description for 'prog' noinst_PROGRAMS = prog prog_SOURCES = prog.c EXTRA_DIST = Makefile $(MAINTAINERCLEANFILES) # clean configured files DISTCLEANFILES = config.status config.log antimake.mk # clean generated files MAINTAINERCLEANFILES = configure config.guess config.sub install-sh antimake.mk.in # launch Antimake include antimake.mk EOF msg Build the project run sh ./autogen.sh runq ./configure run make run ls run ./prog msg Create distribution package run make dist run 'tar tzf actest-0.1.tar.gz | sort' msg Test the distribution package and separate build dir run mkdir -p test run cd test run tar xf ../actest-0.1.tar.gz run mkdir build run cd build runq ../actest-0.1/configure run make run ls run ./prog run cd ../.. msg Clean up run make maintainer-clean run ls msg Done skytools-3.2.6/lib/mk/temos/src/libusual5.temo0000644000000000000000000000263112166266754016206 0ustar . ./libtemo.sh || exit 1 test_start title Use installed libusual with antimake. longmsg <<-MSG Install libusual and link against it. MSG title2 Build libusual usual_clone libusual run cd libusual runq ./autogen.sh runq './configure --disable-shared --prefix=`pwd`/../inst' runq make runq make install run cd .. title2 Build our own code msg Now we prepare our own code. msg First, this is the source file: cat_file prog.c <<"EOF" #include #include #include int main(void) { const char *data = "CECSFXX"; uint32_t crc; crc = calc_crc32(data, strlen(data), 0); printf("crc: %08x\n", crc); return 0; } EOF msg Here is corresponding Makefile: cat_file Makefile <<"EOF" # temo hacks to support local install, not needed otherwise PKG_CONFIG = PKG_CONFIG_PATH=$(CURDIR)/inst/lib/pkgconfig pkg-config # use pkg-config to get libusual info USUAL_CPPFLAGS = $(shell $(PKG_CONFIG) libusual --cflags) USUAL_LDFLAGS = $(shell $(PKG_CONFIG) libusual --libs-only-L) USUAL_LIBS = $(shell $(PKG_CONFIG) libusual --libs-only-l) # Generic Antimake bin_PROGRAMS = prog prog_SOURCES = prog.c prog_CPPFLAGS = $(USUAL_CPPFLAGS) prog_LDFLAGS = $(USUAL_LDFLAGS) prog_LDADD = $(USUAL_LIBS) # use installed Antimake ANTIMAKE = $(shell $(PKG_CONFIG) libusual --variable=antimake) include $(ANTIMAKE) EOF msg Build the project run make run ls run ./prog msg Done! skytools-3.2.6/lib/mk/temos/src/antimake2.temo0000644000000000000000000000460412166266754016156 0ustar . ./libtemo.sh || exit 1 test_start title Test Antimake SUBDIRS longmsg <<-"MSG" Antimake variable `SUBDIRS` list names of directories that the Make needs to recurse into. Each of them contains stand-alone Makefile that directs building in that subdirs. - Plus: you can call 'make' while being in subdir to build only local targets. - Minus: dependencies between subdirs do not work. MSG title2 Setup source tree msg Setup directories, install Antimake run mkdir -p lib1/sublib lib2 run cp ../../antimake.mk . msg Prepare sources cat_file api.h <<"EOF" void func1(void); void func2(void); void func3(void); EOF cat_file main.c <<"EOF" #include #include "api.h" int main(void) { func1(); func2(); func3(); printf("main\n"); return 0; } EOF cat_file lib1/func1.c <<"EOF" #include #include "api.h" void func1(void) { printf("func1\n"); } EOF cat_file lib1/sublib/func2.c <<"EOF" #include #include "api.h" void func2(void) { printf("func2\n"); } EOF cat_file lib2/func3.c <<"EOF" #include #include "api.h" void func3(void) { printf("func3\n"); } EOF msg Prepare Makefiles cat_file Makefile <<"EOF" PACKAGE_NAME = test-subdirs PACKAGE_VERSION = 1.0 SUBDIRS = lib1 lib2 bin_PROGRAMS = prog prog_SOURCES = main.c prog_LDADD = lib1/func1.a lib1/sublib/func2.a lib2/func3.a EXTRA_DIST = Makefile antimake.mk include antimake.mk EOF cat_file lib1/Makefile <<"EOF" SUBLOC = lib1 SUBDIRS = sublib noinst_LIBRARIES = func1.a func1_a_SOURCES = func1.c func1_a_CPPFLAGS = -I.. EXTRA_DIST = Makefile include ../antimake.mk EOF cat_file lib1/sublib/Makefile <<"EOF" SUBLOC = lib1/sublib noinst_LIBRARIES = func2.a func2_a_SOURCES = func2.c func2_a_CPPFLAGS = -I../.. EXTRA_DIST = Makefile include ../../antimake.mk EOF cat_file lib2/Makefile <<"EOF" SUBLOC = lib2 noinst_LIBRARIES = func3.a func3_a_SOURCES = func3.c func3_a_CPPFLAGS = -I$(top_srcdir) EXTRA_DIST = Makefile include ../antimake.mk EOF title2 Building msg Build the project run make run ls run ./prog msg We can now install it: run make install DESTDIR=./inst run ls ./inst/usr/local/bin msg Now we can create package that can be given to others. run make dist run ls run 'tar tzf test-subdirs-1.0.tar.gz | sort' msg Clean the tree run make clean run ls msg Test O= run mkdir -p build run make O=build run ls run ls build msg Done! skytools-3.2.6/lib/mk/temos/Makefile0000644000000000000000000000274412166266754014271 0ustar # # This Makefile does not use Antimake because it must work # even when Antimake is broken. # TEMOS = \ antimake1 antimake2 antimake3 antimake4 antimake5 antimake6 \ libusual1 libusual3 libusual4 libusual5 libusual6 \ OUT = $(addsuffix .txt,$(addprefix output/, $(TEMOS))) HTML = $(addsuffix .html,$(addprefix html/, $(TEMOS))) \ html/index.html html/antimake.html ExpFile = $(subst .temo,.txt,$(subst src/,expected/,$(1))) OutFile = $(subst .temo,.txt,$(subst src/,output/,$(1))) V ?= 0 ifeq ($(V),0) E = @echo Q = @ else E = @true Q = endif all: qtest .PHONY: all test ack clean html html: $(HTML) am: rm -f html/anti* make ftest: clean qtest qtest: $(OUT) @diff -urN expected output > regressions.diff || { \ less regressions.diff; \ echo "FAIL: Result does not match expected output"; \ exit 1; \ } $(Q) rm -f regressions.diff $(E) "All OK" #../antimake.mk ../../m4/usual.m4 output/%.txt: src/%.temo libtemo.sh @mkdir -p output @printf "$< ... " @bash $< > $@ && { cmp -s $@ $(call ExpFile,$<) && echo ok || echo failed; } \ || { echo "$< failed:" ; tail $(call OutFile,$<); exit 1; } html/%.html: output/%.txt @mkdir -p html asciidoc -o - $< | grep -v '^Last updated ' > $@ ack: cp output/*.txt expected/ clean: rm -rf tmp html output regressions.diff html/index.html: index.txt @mkdir -p html asciidoc -o - $< | grep -v '^Last updated ' > $@ html/antimake.html: ../antimake.txt @mkdir -p html asciidoc -a toc -o - $< | grep -v '^Last updated ' > $@ skytools-3.2.6/lib/mk/temos/index.txt0000644000000000000000000000251012166266754014470 0ustar = Libusual/Antimake Build Demos = Here are few demos showing how to use both Libusual and Antimake in various scenarios. If you wonder why there is so many of them, the reason is that these demos are also used as regressions tests for various functionality. (Thus, 'temos') They are ordered in ascending complexity, so pick first one if you want simplest overview. == Reference Documentation == - link:antimake.html[AntiMake documentation] == Using Antimake == - link:antimake1.html[antimake1] - Project without subdirs - link:antimake2.html[antimake2] - Recursive subdirectories (SUBDIRS) - link:antimake3.html[antimake3] - Non-recursive subdirectories (EMBED_SUBDIRS) - link:antimake4.html[antimake4] - Using Autoconf. - link:antimake5.html[antimake5] - Shared libraries with Autoconf. - link:antimake6.html[antimake5] - Subdir stress test. == Using libusual == - link:libusual1.html[libusual1] - Local libusual: Linking against libusual.a with plain Make - link:libusual2.html[libusual2] - Local libusual: Embedding libusual modules with/witout Antimake - link:libusual3.html[libusual3] - Local libusual: Using top-level autoconf with Antimake - link:libusual4.html[libusual4] - Installed libusual: Plain make - link:libusual5.html[libusual5] - Installed libusual: Antimake - link:libusual6.html[libusual6] - Installed libusual: Autoconf skytools-3.2.6/lib/mk/temos/expected/0000755000000000000000000000000012166266754014423 5ustar skytools-3.2.6/lib/mk/temos/expected/libusual6.txt0000644000000000000000000000657412166266754017106 0ustar = Use installed libusual with autoconf and antimake. = Install libusual and link against it. == Build libusual == --------------------------------- $ git clone git://github.com/libusual/libusual.git libusual Cloning into 'libusual'... done. $ cd libusual $ ./autogen.sh [...] $ ./configure --disable-shared --prefix=`pwd`/../inst [...] $ make [...] $ make install [...] $ cd .. --------------------------------- == Build our own code == Now we prepare our own code. First, this is the source file: .File: prog.c [source,c] ----------------------------------- #include #include #include int main(void) { const char *data = "CECSFXX"; uint32_t crc; crc = calc_crc32(data, strlen(data), 0); printf("crc: %08x\n", crc); return 0; } ----------------------------------- Autoconf setup .File: autogen.sh [source,shell] ----------------------------------- # use prepared autgen logic ../../std-autogen.sh ../../.. # fetch Antimake template from libusual cp ../../antimake.mk antimake.mk.in ----------------------------------- .File: extra.mk.in ----------------------------------- USUAL_ANTIMAKE = @USUAL_ANTIMAKE@ USUAL_CFLAGS = @USUAL_CFLAGS@ USUAL_LIBS = @USUAL_LIBS@ ----------------------------------- .File: configure.ac [source,autoconf] ----------------------------------- AC_INIT([achello], [0.1], [https://libusual.github.com]) AC_CONFIG_HEADER([config.h]) AC_CONFIG_SRCDIR([prog.c]) AC_PREREQ([2.59]) AC_USUAL_INIT AC_USUAL_PROGRAM_CHECK PKG_CHECK_MODULES(USUAL, libusual) _PKG_CONFIG([USUAL_ANTIMAKE], [variable=antimake], [libusual]) AC_SUBST([USUAL_ANTIMAKE]) AC_OUTPUT([antimake.mk extra.mk]) ----------------------------------- Here is the source that needs to be linked with libusual: .File: prog.c [source,c] ----------------------------------- #include #include #include int main(void) { const char *data = "CECSFXX"; uint32_t crc; crc = calc_crc32(data, strlen(data), 0); printf("crc: %08x\n", crc); return 0; } ----------------------------------- Antimake based Makefile .File: Makefile [source,makefile] ----------------------------------- # the automake-style build description for 'prog' noinst_PROGRAMS = prog prog_SOURCES = prog.c prog_CPPFLAGS = $(USUAL_CFLAGS) prog_LDADD = $(USUAL_LIBS) # clean configured files DISTCLEANFILES = config.status config.log extra.mk antimake.mk config.h # clean generated files MAINTAINERCLEANFILES = configure config.guess config.sub install-sh \ antimake.mk.in extra.mk.in config.h.in EXTRA_DIST = Makefile $(MAINTAINERCLEANFILES) # launch Antimake include extra.mk include antimake.mk ----------------------------------- Build the project --------------------------------- $ sh ./autogen.sh $ PKG_CONFIG_PATH=`pwd`/inst/lib/pkgconfig ./configure [...] $ make CC prog.c CCLD prog $ ls Makefile config.guess config.status extra.mk libusual antimake.mk config.h config.sub extra.mk.in prog antimake.mk.in config.h.in configure inst prog.c autogen.sh config.log configure.ac install-sh $ ./prog crc: 12345678 $ make am-show-distfiles Makefile antimake.mk.in config.guess config.h.in config.sub configure extra.mk.in install-sh prog.c $ make maintainer-clean CLEAN prog MAINTAINERCLEAN maintainer-clean $ ls Makefile autogen.sh configure.ac inst libusual prog.c --------------------------------- Done! skytools-3.2.6/lib/mk/temos/expected/antimake6.txt0000644000000000000000000002367112166266754017054 0ustar = Antimake stress-test = Autoconf setup .File: autogen.sh [source,shell] ----------------------------------- ../../std-autogen.sh ../../.. # fetch Antimake template from libusual make -f ../../antimake.mk show-config > build.mk.in echo 'include $(abs_top_srcdir)/antimake.mk' >> build.mk.in ln -sf ../../antimake.mk . ln -sf ../../amext-cxx.mk . ----------------------------------- .File: configure.ac [source,autoconf] ----------------------------------- AC_INIT([actest], [0.1]) AC_CONFIG_SRCDIR([esub/prog.c]) AC_PREREQ([2.59]) AC_USUAL_INIT LT_INIT AC_USUAL_PROGRAM_CHECK AC_PROG_CXX AC_OUTPUT([build.mk]) ----------------------------------- Here are the source files: ------------- ./prog.c ./cpptest.cpp ./sub/func1.c ./sub/esub/func2,c ./sub/sub/func3.c ./esub/func4.c ./esub/esub/func5.c ./esub/sub/func6.c ------------- --------------------------------- $ mkdir -p sub/esub sub/sub esub/esub esub/sub --------------------------------- .File: esub/prog.c [source,c] ----------------------------------- #include "func1.h" #include "func2.h" #include "func3.h" #include "func4.h" #include "func5.h" #include "func6.h" #include int main(void) { printf("%s\n", __FILE__); func1(); func2(); func3(); func4(); func5(); func6(); return 0; } ----------------------------------- .File: cpptest.cpp ----------------------------------- #include using namespace std; int main(void) { cout << "Hello" << endl; return 0; } ----------------------------------- .File: sub/func1.c [source,c] ----------------------------------- #include #include "func1.h" void func1(void) { printf("%s\n", __FILE__); } ----------------------------------- .File: sub/func1.h [source,c] ----------------------------------- void func1(void); ----------------------------------- --------------------------------- $ sed s/1/2/ sub/func1.c > sub/esub/func2.c $ sed s/1/2/ sub/func1.h > sub/esub/func2.h $ sed s/1/3/ sub/func1.c > sub/sub/func3.c $ sed s/1/3/ sub/func1.h > sub/sub/func3.h $ sed s/1/4/ sub/func1.c > esub/func4.c $ sed s/1/4/ sub/func1.h > esub/func4.h $ sed s/1/5/ sub/func1.c > esub/sub/func5.c $ sed s/1/5/ sub/func1.h > esub/sub/func5.h $ sed s/1/6/ sub/func1.c > esub/esub/func6.c $ sed s/1/6/ sub/func1.h > esub/esub/func6.h --------------------------------- Now fill makefiles .File: Makefile [source,makefile] ----------------------------------- SUBDIRS = sub EMBED_SUBDIRS = esub AM_FEATURES = cxx override WFLAGS = -Wall EXTRA_DIST = Makefile antimake.mk amext-cxx.mk $(MAINTAINERCLEANFILES) # clean configured files DISTCLEANFILES = \ config.status \ config.log \ libtool # clean generated files MAINTAINERCLEANFILES = \ configure \ config.guess \ config.sub \ install-sh \ build.mk.in \ ltmain.sh noinst_PROGRAMS = cpptest cpptest_SOURCES = cpptest.cpp # launch Antimake include build.mk ----------------------------------- .File: sub/Makefile [source,makefile] ----------------------------------- SUBLOC = sub SUBDIRS = sub EMBED_SUBDIRS = esub noinst_LIBRARIES = libfunc1.a libfunc1_a_SOURCES = func1.c func1.h EXTRA_DIST = Makefile include ../build.mk ----------------------------------- .File: sub/sub/Makefile [source,makefile] ----------------------------------- SUBLOC = sub/sub EXTRA_DIST = Makefile noinst_LIBRARIES = libfunc3.a libfunc3_a_SOURCES = func3.c func3.h include ../../build.mk ----------------------------------- .File: sub/esub/Makefile.am ----------------------------------- noinst_LIBRARIES = libfunc2.a libfunc2_a_SOURCES = func2.c func2.h EXTRA_DIST = Makefile.am ----------------------------------- .File: esub/Makefile.am ----------------------------------- SUBDIRS = sub EMBED_SUBDIRS = esub EXTRA_DIST = Makefile.am noinst_LIBRARIES = libfunc4.a libfunc4_a_SOURCES = func4.c func4.h noinst_PROGRAMS = prog prog_SOURCES = prog.c prog_LDFLAGS = -L../sub -L../sub/esub -L. -Lsub prog_LDADD = \ -lfunc1 \ -lfunc2 \ $(topdir)/sub/sub/libfunc3.a \ -lfunc4 \ -lfunc5 \ esub/libfunc6.a prog_CFLAGS = -I../sub prog_CPPFLAGS = \ -I../sub/esub \ -I$(topdir)/sub/sub \ -I. \ -Iesub \ -I./sub ----------------------------------- .File: esub/sub/Makefile [source,makefile] ----------------------------------- SUBLOC = esub/sub EXTRA_DIST = Makefile noinst_LIBRARIES = libfunc5.a libfunc5_a_SOURCES = func5.c func5.h include ../../build.mk ----------------------------------- .File: esub/esub/Makefile.am ----------------------------------- EXTRA_DIST = Makefile.am noinst_LIBRARIES = libfunc6.a libfunc6_a_SOURCES = func6.c func6.h ----------------------------------- Build the project --------------------------------- $ sh ./autogen.sh $ ./configure [...] $ make --> sub --> sub/sub CC func3.c AR libfunc3.a RANLIB libfunc3.a <-- sub/sub CC func1.c AR libfunc1.a RANLIB libfunc1.a CC esub/func2.c AR esub/libfunc2.a RANLIB esub/libfunc2.a <-- sub --> esub/sub CC func5.c AR libfunc5.a RANLIB libfunc5.a <-- esub/sub CC esub/func4.c AR esub/libfunc4.a RANLIB esub/libfunc4.a CC esub/esub/func6.c AR esub/esub/libfunc6.a RANLIB esub/esub/libfunc6.a CXX cpptest.cpp CXXLD cpptest CC esub/prog.c CCLD esub/prog $ ls Makefile build.mk config.status cpptest libtool amext-cxx.mk build.mk.in config.sub cpptest.cpp ltmain.sh antimake.mk config.guess configure esub sub autogen.sh config.log configure.ac install-sh $ ./esub/prog esub/prog.c func1.c esub/func2.c func3.c esub/func4.c func5.c esub/esub/func6.c --------------------------------- Create distribution package --------------------------------- $ make dist CHECK dist-gzip MKDIR actest-0.1 COPY actest-0.1 PACK actest-0.1.tar.gz $ tar tzf actest-0.1.tar.gz | sort actest-0.1/ actest-0.1/Makefile actest-0.1/amext-cxx.mk actest-0.1/antimake.mk actest-0.1/build.mk.in actest-0.1/config.guess actest-0.1/config.sub actest-0.1/configure actest-0.1/cpptest.cpp actest-0.1/esub/ actest-0.1/esub/Makefile.am actest-0.1/esub/esub/ actest-0.1/esub/esub/Makefile.am actest-0.1/esub/esub/func6.c actest-0.1/esub/esub/func6.h actest-0.1/esub/func4.c actest-0.1/esub/func4.h actest-0.1/esub/prog.c actest-0.1/esub/sub/ actest-0.1/esub/sub/Makefile actest-0.1/esub/sub/func5.c actest-0.1/esub/sub/func5.h actest-0.1/install-sh actest-0.1/ltmain.sh actest-0.1/sub/ actest-0.1/sub/Makefile actest-0.1/sub/esub/ actest-0.1/sub/esub/Makefile.am actest-0.1/sub/esub/func2.c actest-0.1/sub/esub/func2.h actest-0.1/sub/func1.c actest-0.1/sub/func1.h actest-0.1/sub/sub/ actest-0.1/sub/sub/Makefile actest-0.1/sub/sub/func3.c actest-0.1/sub/sub/func3.h --------------------------------- Test installation --------------------------------- $ make install DESTDIR=`pwd`/inst --> sub --> sub/sub make[2]: Nothing to be done for `install'. <-- sub/sub <-- sub --> esub/sub make[1]: Nothing to be done for `install'. <-- esub/sub $ ls Makefile autogen.sh config.log configure.ac install-sh actest-0.1.tar.gz build.mk config.status cpptest libtool amext-cxx.mk build.mk.in config.sub cpptest.cpp ltmain.sh antimake.mk config.guess configure esub sub $ find inst | sort find: `inst': No such file or directory --------------------------------- Test the distribution package and separate build dir --------------------------------- $ mkdir -p test $ cd test $ tar xf ../actest-0.1.tar.gz $ mkdir build $ cd build $ ../actest-0.1/configure [...] $ make MKDIR Create sub --> sub MKDIR Create sub/sub --> sub/sub CC ../../../actest-0.1/sub/sub/func3.c AR libfunc3.a RANLIB libfunc3.a <-- sub/sub CC ../../actest-0.1/sub/func1.c AR libfunc1.a RANLIB libfunc1.a CC ../../actest-0.1/sub/esub/func2.c AR esub/libfunc2.a RANLIB esub/libfunc2.a <-- sub MKDIR Create esub/sub --> esub/sub CC ../../../actest-0.1/esub/sub/func5.c AR libfunc5.a RANLIB libfunc5.a <-- esub/sub CC ../actest-0.1/esub/func4.c AR esub/libfunc4.a RANLIB esub/libfunc4.a CC ../actest-0.1/esub/esub/func6.c AR esub/esub/libfunc6.a RANLIB esub/esub/libfunc6.a CXX ../actest-0.1/cpptest.cpp CXXLD cpptest CC ../actest-0.1/esub/prog.c CCLD esub/prog $ ls Makefile build.mk config.log config.status cpptest esub libtool sub $ make esub/prog make: `esub/prog' is up to date. $ ./esub/prog ../actest-0.1/esub/prog.c ../../actest-0.1/sub/func1.c ../../actest-0.1/sub/esub/func2.c ../../../actest-0.1/sub/sub/func3.c ../actest-0.1/esub/func4.c ../../../actest-0.1/esub/sub/func5.c ../actest-0.1/esub/esub/func6.c $ cd ../.. --------------------------------- Clean up --------------------------------- $ make maintainer-clean --> sub --> sub/sub CLEAN libfunc3.a <-- sub/sub CLEAN libfunc1.a CLEAN esub/libfunc2.a CLEAN clean <-- sub --> esub/sub CLEAN libfunc5.a <-- esub/sub CLEAN esub/libfunc4.a CLEAN esub/esub/libfunc6.a CLEAN cpptest CLEAN esub/prog CLEAN clean --> sub --> sub/sub CLEAN libfunc3.a <-- sub/sub CLEAN libfunc1.a CLEAN esub/libfunc2.a CLEAN clean --> sub/sub CLEAN libfunc3.a MAINTAINERCLEAN maintainer-clean <-- sub/sub MAINTAINERCLEAN maintainer-clean <-- sub --> esub/sub CLEAN libfunc5.a MAINTAINERCLEAN maintainer-clean <-- esub/sub MAINTAINERCLEAN maintainer-clean $ ls Makefile amext-cxx.mk autogen.sh configure.ac esub test actest-0.1.tar.gz antimake.mk build.mk cpptest.cpp sub --------------------------------- Done skytools-3.2.6/lib/mk/temos/expected/libusual5.txt0000644000000000000000000000335612166266754017100 0ustar = Use installed libusual with antimake. = Install libusual and link against it. == Build libusual == --------------------------------- $ git clone git://github.com/libusual/libusual.git libusual Cloning into 'libusual'... done. $ cd libusual $ ./autogen.sh [...] $ ./configure --disable-shared --prefix=`pwd`/../inst [...] $ make [...] $ make install [...] $ cd .. --------------------------------- == Build our own code == Now we prepare our own code. First, this is the source file: .File: prog.c [source,c] ----------------------------------- #include #include #include int main(void) { const char *data = "CECSFXX"; uint32_t crc; crc = calc_crc32(data, strlen(data), 0); printf("crc: %08x\n", crc); return 0; } ----------------------------------- Here is corresponding Makefile: .File: Makefile [source,makefile] ----------------------------------- # temo hacks to support local install, not needed otherwise PKG_CONFIG = PKG_CONFIG_PATH=$(CURDIR)/inst/lib/pkgconfig pkg-config # use pkg-config to get libusual info USUAL_CPPFLAGS = $(shell $(PKG_CONFIG) libusual --cflags) USUAL_LDFLAGS = $(shell $(PKG_CONFIG) libusual --libs-only-L) USUAL_LIBS = $(shell $(PKG_CONFIG) libusual --libs-only-l) # Generic Antimake bin_PROGRAMS = prog prog_SOURCES = prog.c prog_CPPFLAGS = $(USUAL_CPPFLAGS) prog_LDFLAGS = $(USUAL_LDFLAGS) prog_LDADD = $(USUAL_LIBS) # use installed Antimake ANTIMAKE = $(shell $(PKG_CONFIG) libusual --variable=antimake) include $(ANTIMAKE) ----------------------------------- Build the project --------------------------------- $ make CC prog.c CCLD prog $ ls Makefile inst libusual prog prog.c $ ./prog crc: 12345678 --------------------------------- Done! skytools-3.2.6/lib/mk/temos/expected/antimake5.txt0000644000000000000000000001001612166266754017040 0ustar = Shared libraries and autoconf = Autoconf setup .File: autogen.sh [source,shell] ----------------------------------- ../../std-autogen.sh ../../.. # fetch Antimake template from libusual cp ../../antimake.mk antimake.mk.in ----------------------------------- .File: configure.ac [source,autoconf] ----------------------------------- AC_INIT([actest], [0.1]) AC_CONFIG_SRCDIR([prog.c]) AC_PREREQ([2.59]) LT_INIT AC_USUAL_INIT AC_USUAL_PROGRAM_CHECK AC_OUTPUT([antimake.mk]) ----------------------------------- Here are the source files: .File: prog.c [source,c] ----------------------------------- void func1(void); int main(void) { func1(); return 0; } ----------------------------------- .File: func.c [source,c] ----------------------------------- #include void func1(void); void func1(void) { printf("hello from func1\n"); } ----------------------------------- Antimake based Makefile .File: Makefile [source,makefile] ----------------------------------- lib_LTLIBRARIES = libtemo.la libtemo_la_SOURCES = func.c libtemo_la_LDFLAGS = -version-info 3:0:2 bin_PROGRAMS = prog prog_SOURCES = prog.c prog_LDADD = libtemo.la # clean configured files DISTCLEANFILES = \ config.status \ config.log \ antimake.mk \ libtool # clean generated files MAINTAINERCLEANFILES = \ configure \ config.guess \ config.sub \ install-sh \ antimake.mk.in \ ltmain.sh EXTRA_DIST = \ Makefile \ $(MAINTAINERCLEANFILES) # launch Antimake include antimake.mk ----------------------------------- Build the project --------------------------------- $ sh ./autogen.sh $ ./configure [...] $ make CC prog.c CC func.c CCLD libtemo.la CCLD prog $ ls Makefile autogen.sh config.status configure.ac libtemo.la prog antimake.mk config.guess config.sub func.c libtool prog.c antimake.mk.in config.log configure install-sh ltmain.sh $ ./prog hello from func1 --------------------------------- Create distribution package --------------------------------- $ make dist CHECK dist-gzip MKDIR actest-0.1 COPY actest-0.1 PACK actest-0.1.tar.gz $ tar tzf actest-0.1.tar.gz | sort actest-0.1/ actest-0.1/Makefile actest-0.1/antimake.mk.in actest-0.1/config.guess actest-0.1/config.sub actest-0.1/configure actest-0.1/func.c actest-0.1/install-sh actest-0.1/ltmain.sh actest-0.1/prog.c --------------------------------- Test installation --------------------------------- $ make install DESTDIR=/tmp/test-inst INSTALL prog /tmp/test-inst/usr/local/bin INSTALL libtemo.la /tmp/test-inst/usr/local/lib libtool: install: warning: remember to run `libtool --finish /usr/local/lib' $ ls Makefile autogen.sh config.sub install-sh prog actest-0.1.tar.gz config.guess configure libtemo.la prog.c antimake.mk config.log configure.ac libtool antimake.mk.in config.status func.c ltmain.sh $ find /tmp/test-inst | sort /tmp/test-inst /tmp/test-inst/usr /tmp/test-inst/usr/local /tmp/test-inst/usr/local/bin /tmp/test-inst/usr/local/bin/prog /tmp/test-inst/usr/local/lib /tmp/test-inst/usr/local/lib/libtemo.a /tmp/test-inst/usr/local/lib/libtemo.la /tmp/test-inst/usr/local/lib/libtemo.so /tmp/test-inst/usr/local/lib/libtemo.so.1 /tmp/test-inst/usr/local/lib/libtemo.so.1.2.0 $ rm -rf /tmp/test-inst --------------------------------- Test the distribution package and separate build dir --------------------------------- $ mkdir -p test $ cd test $ tar xf ../actest-0.1.tar.gz $ mkdir build $ cd build $ ../actest-0.1/configure [...] $ make CC ../actest-0.1/prog.c CC ../actest-0.1/func.c CCLD libtemo.la CCLD prog $ ls Makefile antimake.mk config.log config.status libtemo.la libtool prog $ ./prog hello from func1 $ cd ../.. --------------------------------- Clean up --------------------------------- $ make maintainer-clean CLEAN prog CLEAN libtemo.la MAINTAINERCLEAN maintainer-clean $ ls Makefile actest-0.1.tar.gz autogen.sh configure.ac func.c prog.c test --------------------------------- Done skytools-3.2.6/lib/mk/temos/expected/libusual1.txt0000644000000000000000000000325312166266754017070 0ustar = Use libusual the simplest way = Simplest usage would be to configure and build libusual locally and point your projects CPPFLAGS and LDFLAGS there. That way you get access to not only code but also various autoconfigued symbols without any complexities in your project. == Build libusual == --------------------------------- $ git clone git://github.com/libusual/libusual.git lib Cloning into 'lib'... done. $ cd lib $ ./autogen.sh [...] $ ./configure --disable-shared --prefix=/opt [...] $ make [...] $ make install DESTDIR=`pwd`/../inst [...] $ cd .. --------------------------------- == Build our own code == Now we prepare our own code. First, this is the source file: .File: prog.c [source,c] ----------------------------------- #include #include #include int main(void) { const char *data = "CECSFXX"; uint32_t crc; crc = calc_crc32(data, strlen(data), 0); printf("crc: %08x\n", crc); return 0; } ----------------------------------- Here is corresponding Makefile: .File: Makefile [source,makefile] ----------------------------------- # here we describe our program SRCS = prog.c OBJS = $(SRCS:.c=.o) # here we link to libusual CPPFLAGS = -I./inst/opt/include LDFLAGS = -L./inst/opt/lib LIBS = -lusual CC = gcc CFLAGS = -O -g -Wall all: prog prog: $(OBJS) $(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) $(LIBS) -o $@ ----------------------------------- Build the project --------------------------------- $ make gcc -O -g -Wall -I./inst/opt/include -c -o prog.o prog.c gcc -O -g -Wall -L./inst/opt/lib prog.o -lusual -o prog $ ls Makefile inst lib prog prog.c prog.o $ ./prog crc: 12345678 --------------------------------- Done! skytools-3.2.6/lib/mk/temos/expected/antimake3.txt0000644000000000000000000001405312166266754017043 0ustar = Test Antimake EMBED_SUBDIRS = Antimake variable `EMBED_SUBDIRS` list names of directories that contains Makefile fragmants that are to be embedded into current Makefile. - Plus: Proper dependencies, work well with parallel Make. - Minus: Cannot go into subdir and run make there. - Minus: Fragments are not stand-alone, so need some care when writing. == Intro to EMBED_SUBDIRS == To better understand what EMBED_SUBDIRS does, let\'s start with simple case - single Makefile that references files under subdir: --------------------------------- $ mkdir -p src $ cp ../../antimake.mk . --------------------------------- .File: Makefile [source,makefile] ----------------------------------- bin_PROGRAMS = src/myprog src_myprog_SOURCES = src/myprog.c include antimake.mk ----------------------------------- .File: src/myprog.c [source,c] ----------------------------------- #include int main(void) { printf("myprog\n"); return 0; } ----------------------------------- --------------------------------- $ make CC src/myprog.c CCLD src/myprog $ ./src/myprog myprog $ make clean CLEAN src/myprog --------------------------------- Now you can put the lines that reference files under `src/` also into `src` and include that from top-level Makefile: .File: src/Makefile.inc ----------------------------------- bin_PROGRAMS = src/myprog src_myprog_SOURCES = src/myprog.c ----------------------------------- .File: Makefile [source,makefile] ----------------------------------- include src/Makefile.inc include antimake.mk ----------------------------------- --------------------------------- $ make CC src/myprog.c CCLD src/myprog $ ./src/myprog myprog --------------------------------- This works but the problem is that although the Makefile is local, it still sees files from top-Makefile-level. So that is what `EMBED_SUBDIRS` helps with - it allow to use local filenames in Makefile fragment, and it converts them to top-level filenames when including. It knows only few type of variables it needs to convert: - target filenames in primares lists (*_PROGRAMS, *_LIBRARIES, etc) - target_SOURCES: foo_SOURCES -> sub_dir_foo_SOURCES with filename conversion - other target variables: `foo_*` -> `sub_dir_foo_*` without filename conversion - EXTRA_DIST, CLEANFILES, DISTCLEANFILES, MAINTAINERCLEANFILES Any other variables stay untouched, and obviously they can mess up top-level variables. So the included Makefile should be as clean as possible. == Setup source tree for EMBED_SUBDIRS == Setup directories, install Antimake --------------------------------- $ mkdir -p lib1/sublib lib2 $ cp ../../antimake.mk . --------------------------------- Prepare sources .File: main.c [source,c] ----------------------------------- #include void func1(void); void func2(void); void func3(void); int main(void) { func1(); func2(); func3(); printf("main\n"); return 0; } ----------------------------------- .File: lib1/func1.c [source,c] ----------------------------------- #include void func1(void) { printf("func1\n"); } ----------------------------------- .File: lib1/sublib/func2.c [source,c] ----------------------------------- #include void func2(void) { printf("func2\n"); } ----------------------------------- .File: lib2/func3.c [source,c] ----------------------------------- #include void func3(void) { printf("func3\n"); } ----------------------------------- Prepare Makefiles .File: Makefile [source,makefile] ----------------------------------- PACKAGE_NAME = test-subdirs PACKAGE_VERSION = 1.0 EMBED_SUBDIRS = lib1 lib1/sublib lib2 bin_PROGRAMS = prog prog_SOURCES = main.c prog_LDADD = lib1/func1.a lib1/sublib/func2.a lib2/func3.a EXTRA_DIST = Makefile antimake.mk include antimake.mk ----------------------------------- .File: lib1/Makefile.am ----------------------------------- noinst_LIBRARIES = func1.a func1_a_SOURCES = func1.c EXTRA_DIST = Makefile.am ----------------------------------- .File: lib1/sublib/Makefile.am ----------------------------------- noinst_LIBRARIES = func2.a func2_a_SOURCES = func2.c EXTRA_DIST = Makefile.am ----------------------------------- .File: lib2/Makefile.am ----------------------------------- noinst_LIBRARIES = func3.a func3_a_SOURCES = func3.c EXTRA_DIST = Makefile.am ----------------------------------- == Building == Build the project --------------------------------- $ make CC main.c CC lib1/func1.c AR lib1/func1.a RANLIB lib1/func1.a CC lib1/sublib/func2.c AR lib1/sublib/func2.a RANLIB lib1/sublib/func2.a CC lib2/func3.c AR lib2/func3.a RANLIB lib2/func3.a CCLD prog $ ls Makefile antimake.mk lib1 lib2 main.c prog src $ ./prog func1 func2 func3 main --------------------------------- We can now install it: --------------------------------- $ make install prefix=/opt DESTDIR=./inst INSTALL prog ./inst/opt/bin $ ls ./inst/opt/bin prog --------------------------------- Now we can create package that can be given to others. --------------------------------- $ make dist CHECK dist-gzip MKDIR test-subdirs-1.0 COPY test-subdirs-1.0 PACK test-subdirs-1.0.tar.gz $ ls Makefile inst lib2 prog test-subdirs-1.0.tar.gz antimake.mk lib1 main.c src $ tar tzf test-subdirs-1.0.tar.gz | sort test-subdirs-1.0/ test-subdirs-1.0/Makefile test-subdirs-1.0/antimake.mk test-subdirs-1.0/lib1/ test-subdirs-1.0/lib1/Makefile.am test-subdirs-1.0/lib1/func1.c test-subdirs-1.0/lib1/sublib/ test-subdirs-1.0/lib1/sublib/Makefile.am test-subdirs-1.0/lib1/sublib/func2.c test-subdirs-1.0/lib2/ test-subdirs-1.0/lib2/Makefile.am test-subdirs-1.0/lib2/func3.c test-subdirs-1.0/main.c --------------------------------- Clean the tree --------------------------------- $ make clean CLEAN prog CLEAN lib1/func1.a CLEAN lib1/sublib/func2.a CLEAN lib2/func3.a CLEAN clean $ ls Makefile antimake.mk inst lib1 lib2 main.c src test-subdirs-1.0.tar.gz --------------------------------- Done! skytools-3.2.6/lib/mk/temos/expected/antimake4.txt0000644000000000000000000000510312166266754017040 0ustar = Using Antimake with autoconf = Autoconf setup .File: autogen.sh [source,shell] ----------------------------------- ../../std-autogen.sh ../../.. # fetch Antimake template from libusual cp ../../antimake.mk antimake.mk.in ----------------------------------- .File: configure.ac [source,autoconf] ----------------------------------- AC_INIT([actest], [0.1]) AC_CONFIG_SRCDIR([prog.c]) AC_PREREQ([2.59]) AC_USUAL_INIT AC_USUAL_PROGRAM_CHECK AC_OUTPUT([antimake.mk]) ----------------------------------- Here is the source we want to build: .File: prog.c [source,c] ----------------------------------- #include #include int main(void) { printf("hello\n"); return 0; } ----------------------------------- Antimake based Makefile .File: Makefile [source,makefile] ----------------------------------- # the automake-style build description for 'prog' noinst_PROGRAMS = prog prog_SOURCES = prog.c EXTRA_DIST = Makefile $(MAINTAINERCLEANFILES) # clean configured files DISTCLEANFILES = config.status config.log antimake.mk # clean generated files MAINTAINERCLEANFILES = configure config.guess config.sub install-sh antimake.mk.in # launch Antimake include antimake.mk ----------------------------------- Build the project --------------------------------- $ sh ./autogen.sh $ ./configure [...] $ make CC prog.c CCLD prog $ ls Makefile autogen.sh config.status configure.ac prog.c antimake.mk config.guess config.sub install-sh antimake.mk.in config.log configure prog $ ./prog hello --------------------------------- Create distribution package --------------------------------- $ make dist CHECK dist-gzip MKDIR actest-0.1 COPY actest-0.1 PACK actest-0.1.tar.gz $ tar tzf actest-0.1.tar.gz | sort actest-0.1/ actest-0.1/Makefile actest-0.1/antimake.mk.in actest-0.1/config.guess actest-0.1/config.sub actest-0.1/configure actest-0.1/install-sh actest-0.1/prog.c --------------------------------- Test the distribution package and separate build dir --------------------------------- $ mkdir -p test $ cd test $ tar xf ../actest-0.1.tar.gz $ mkdir build $ cd build $ ../actest-0.1/configure [...] $ make CC ../actest-0.1/prog.c CCLD prog $ ls Makefile antimake.mk config.log config.status prog $ ./prog hello $ cd ../.. --------------------------------- Clean up --------------------------------- $ make maintainer-clean CLEAN prog MAINTAINERCLEAN maintainer-clean $ ls Makefile actest-0.1.tar.gz autogen.sh configure.ac prog.c test --------------------------------- Done skytools-3.2.6/lib/mk/temos/expected/libusual4.txt0000644000000000000000000000773612166266754017105 0ustar = Use installed libusual = Install libusual and link against it. == Build libusual == --------------------------------- $ git clone git://github.com/libusual/libusual.git libusual Cloning into 'libusual'... done. $ cd libusual $ ./autogen.sh [...] $ ./configure --disable-static --prefix=`pwd`/../inst [...] $ make [...] $ make install [...] $ cd .. $ find inst | sort inst inst/include inst/include/usual inst/include/usual/aatree.h inst/include/usual/base.h inst/include/usual/base_win32.h inst/include/usual/bits.h inst/include/usual/cbtree.h inst/include/usual/cfparser.h inst/include/usual/config.h inst/include/usual/config_msvc.h inst/include/usual/crypto inst/include/usual/crypto/digest.h inst/include/usual/crypto/hmac.h inst/include/usual/crypto/keccak.h inst/include/usual/crypto/md5.h inst/include/usual/crypto/sha1.h inst/include/usual/crypto/sha256.h inst/include/usual/crypto/sha512.h inst/include/usual/ctype.h inst/include/usual/cxalloc.h inst/include/usual/cxextra.h inst/include/usual/daemon.h inst/include/usual/endian.h inst/include/usual/err.h inst/include/usual/event.h inst/include/usual/fileutil.h inst/include/usual/fnmatch.h inst/include/usual/getopt.h inst/include/usual/hashing inst/include/usual/hashing/crc32.h inst/include/usual/hashing/lookup3.h inst/include/usual/hashing/siphash.h inst/include/usual/hashtab-impl.h inst/include/usual/heap.h inst/include/usual/list.h inst/include/usual/logging.h inst/include/usual/mbuf.h inst/include/usual/mdict.h inst/include/usual/mempool.h inst/include/usual/misc.h inst/include/usual/netdb.h inst/include/usual/pgutil.h inst/include/usual/pgutil_kwlookup.h inst/include/usual/pthread.h inst/include/usual/regex.h inst/include/usual/safeio.h inst/include/usual/shlist.h inst/include/usual/signal.h inst/include/usual/slab.h inst/include/usual/socket.h inst/include/usual/socket_win32.h inst/include/usual/statlist.h inst/include/usual/string.h inst/include/usual/strpool.h inst/include/usual/time.h inst/include/usual/utf8.h inst/include/usual/wchar.h inst/lib inst/lib/libusual.a inst/lib/pkgconfig inst/lib/pkgconfig/libusual.pc inst/share inst/share/aclocal inst/share/aclocal/antimake.m4 inst/share/aclocal/usual.m4 inst/share/libusual inst/share/libusual/amext-cxx.mk inst/share/libusual/amext-libusual.mk inst/share/libusual/amext-modes.mk inst/share/libusual/amext-msvc.mk inst/share/libusual/antimake.mk inst/share/libusual/find_modules.sh inst/share/libusual/std-autogen.sh --------------------------------- == Build our own code == Now we prepare our own code. First, this is the source file: .File: prog.c [source,c] ----------------------------------- #include #include #include int main(void) { const char *data = "CECSFXX"; uint32_t crc; crc = calc_crc32(data, strlen(data), 0); printf("crc: %08x\n", crc); return 0; } ----------------------------------- Here is corresponding Makefile: .File: Makefile [source,makefile] ----------------------------------- CC = gcc CFLAGS = -O -g -Wall # here we describe our program SRCS = prog.c OBJS = $(SRCS:.c=.o) # put libusual flags to proper place CPPFLAGS = $(USUAL_CPPFLAGS) LIBS = $(USUAL_LIBS) # use pkg-config to get libusual info USUAL_CPPFLAGS = $(shell $(PKG_CONFIG) --cflags libusual) USUAL_LIBS = $(shell $(PKG_CONFIG) --libs libusual) # temo hacks to support local install, not needed otherwise PKG_CONFIG := PKG_CONFIG_PATH=$(CURDIR)/inst/lib/pkgconfig pkg-config CPPFLAGS := $(subst $(CURDIR)/libusual/../,./,$(CPPFLAGS)) LIBS := $(subst $(CURDIR)/libusual/../,./,$(LIBS)) all: prog %.o: %.c $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< prog: $(OBJS) $(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) $(LIBS) -o $@ ----------------------------------- Build the project --------------------------------- $ make gcc -O -g -Wall -I./inst/include -c -o prog.o prog.c gcc -O -g -Wall prog.o -L./inst/lib -lusual -o prog $ ls Makefile inst libusual prog prog.c prog.o $ LD_LIBRARY_PATH=./inst/lib ./prog crc: 12345678 --------------------------------- Done! skytools-3.2.6/lib/mk/temos/expected/libusual3.txt0000644000000000000000000000501212166266754017065 0ustar = Using Autoconf and embedded libusual = == Fetch libusual == Here we close libusual repo, but do not configure nor build it. --------------------------------- $ git clone git://github.com/libusual/libusual.git lib Cloning into 'lib'... done. --------------------------------- Autoconf setup .File: autogen.sh [source,shell] ----------------------------------- # use prepared autgen logic ./lib/mk/std-autogen.sh ./lib # fetch Antimake template from libusual cp lib/mk/antimake.mk antimake.mk.in ----------------------------------- .File: configure.ac [source,autoconf] ----------------------------------- AC_INIT([achello], [0.1], [https://libusual.github.com]) AC_CONFIG_SRCDIR([prog.c]) AC_CONFIG_HEADER([lib/usual/config.h]) AC_PREREQ([2.59]) AC_USUAL_PORT_CHECK AC_USUAL_PROGRAM_CHECK AC_USUAL_HEADER_CHECK AC_USUAL_TYPE_CHECK AC_USUAL_FUNCTION_CHECK AC_OUTPUT([antimake.mk]) ----------------------------------- Here is the source that needs to be linked with libusual: .File: prog.c [source,c] ----------------------------------- #include #include #include int main(void) { const char *data = "CECSFXX"; uint32_t crc; crc = calc_crc32(data, strlen(data), 0); printf("crc: %08x\n", crc); return 0; } ----------------------------------- Antimake based Makefile .File: Makefile [source,makefile] ----------------------------------- # the automake-style build description for 'prog' noinst_PROGRAMS = prog prog_SOURCES = prog.c # location of configured libusual USUAL_DIR = lib # mention that 'prog' wants embedded libusual prog_EMBED_LIBUSUAL = 1 AM_FEATURES = libusual # clean configured files DISTCLEANFILES = config.status config.log \ antimake.mk $(USUAL_DIR)/usual/config.h # clean generated files MAINTAINERCLEANFILES = configure config.guess config.sub install-sh \ antimake.mk.in $(USUAL_DIR)/usual/config.h.in # launch Antimake include $(USUAL_DIR)/mk/antimake.mk ----------------------------------- Build the project --------------------------------- $ sh ./autogen.sh $ ./configure [...] $ make CC prog.c CC lib/usual/hashing/crc32.c CC lib/usual/base.c CCLD prog $ ls Makefile autogen.sh config.status configure.ac prog antimake.mk config.guess config.sub install-sh prog.c antimake.mk.in config.log configure lib $ ./prog crc: 12345678 $ make maintainer-clean CLEAN prog MAINTAINERCLEAN maintainer-clean $ ls Makefile autogen.sh configure.ac lib prog.c --------------------------------- Done skytools-3.2.6/lib/mk/temos/expected/antimake1.txt0000644000000000000000000000453712166266754017047 0ustar = Test Antimake = == Simplest usage == Here we avoid use of any autotools. First, this is the source file: .File: hello.c [source,c] ----------------------------------- #include int main(void) { printf("Hello, world\n"); return 0; } ----------------------------------- Here is corresponding Makefile: .File: Makefile [source,makefile] ----------------------------------- # This is target list - it's name describes target type # and how it is installed, it's value target files to be built. # bin - the targets will be installed under $(bindir) # PROGRAMS - the target is executable built from many sources bin_PROGRAMS = hello # The target 'hello'-s source file list. hello_SOURCES = hello.c # Run Antimake include antimake.mk ----------------------------------- Also install Antimake and we are ready to build: --------------------------------- $ cp ../../antimake.mk . $ ls Makefile antimake.mk hello.c --------------------------------- Build the project --------------------------------- $ make CC hello.c CCLD hello $ ls Makefile antimake.mk hello hello.c $ ./hello Hello, world --------------------------------- We can even install it already: --------------------------------- $ make install prefix=/opt DESTDIR=./inst INSTALL hello ./inst/opt/bin $ ls ./inst/opt/bin hello --------------------------------- For creating source package, we need to provide additional info: .File: Makefile [source,makefile] ----------------------------------- # Package name and version for tarball filename PACKAGE_NAME = myhello PACKAGE_VERSION = 1.0 # Non-source files to put into tarball EXTRA_DIST = Makefile antimake.mk bin_PROGRAMS = hello hello_SOURCES = hello.c include antimake.mk ----------------------------------- Now we can create package that can be given to others. --------------------------------- $ make dist CHECK dist-gzip MKDIR myhello-1.0 COPY myhello-1.0 PACK myhello-1.0.tar.gz $ ls Makefile antimake.mk hello hello.c inst myhello-1.0.tar.gz $ tar tzf myhello-1.0.tar.gz | sort myhello-1.0/ myhello-1.0/Makefile myhello-1.0/antimake.mk myhello-1.0/hello.c --------------------------------- Clean the tree --------------------------------- $ make clean CLEAN hello $ ls Makefile antimake.mk hello.c inst myhello-1.0.tar.gz --------------------------------- Done! skytools-3.2.6/lib/mk/temos/expected/antimake2.txt0000644000000000000000000001257712166266754017053 0ustar = Test Antimake SUBDIRS = Antimake variable `SUBDIRS` list names of directories that the Make needs to recurse into. Each of them contains stand-alone Makefile that directs building in that subdirs. - Plus: you can call 'make' while being in subdir to build only local targets. - Minus: dependencies between subdirs do not work. == Setup source tree == Setup directories, install Antimake --------------------------------- $ mkdir -p lib1/sublib lib2 $ cp ../../antimake.mk . --------------------------------- Prepare sources .File: api.h [source,c] ----------------------------------- void func1(void); void func2(void); void func3(void); ----------------------------------- .File: main.c [source,c] ----------------------------------- #include #include "api.h" int main(void) { func1(); func2(); func3(); printf("main\n"); return 0; } ----------------------------------- .File: lib1/func1.c [source,c] ----------------------------------- #include #include "api.h" void func1(void) { printf("func1\n"); } ----------------------------------- .File: lib1/sublib/func2.c [source,c] ----------------------------------- #include #include "api.h" void func2(void) { printf("func2\n"); } ----------------------------------- .File: lib2/func3.c [source,c] ----------------------------------- #include #include "api.h" void func3(void) { printf("func3\n"); } ----------------------------------- Prepare Makefiles .File: Makefile [source,makefile] ----------------------------------- PACKAGE_NAME = test-subdirs PACKAGE_VERSION = 1.0 SUBDIRS = lib1 lib2 bin_PROGRAMS = prog prog_SOURCES = main.c prog_LDADD = lib1/func1.a lib1/sublib/func2.a lib2/func3.a EXTRA_DIST = Makefile antimake.mk include antimake.mk ----------------------------------- .File: lib1/Makefile [source,makefile] ----------------------------------- SUBLOC = lib1 SUBDIRS = sublib noinst_LIBRARIES = func1.a func1_a_SOURCES = func1.c func1_a_CPPFLAGS = -I.. EXTRA_DIST = Makefile include ../antimake.mk ----------------------------------- .File: lib1/sublib/Makefile [source,makefile] ----------------------------------- SUBLOC = lib1/sublib noinst_LIBRARIES = func2.a func2_a_SOURCES = func2.c func2_a_CPPFLAGS = -I../.. EXTRA_DIST = Makefile include ../../antimake.mk ----------------------------------- .File: lib2/Makefile [source,makefile] ----------------------------------- SUBLOC = lib2 noinst_LIBRARIES = func3.a func3_a_SOURCES = func3.c func3_a_CPPFLAGS = -I$(top_srcdir) EXTRA_DIST = Makefile include ../antimake.mk ----------------------------------- == Building == Build the project --------------------------------- $ make --> lib1 --> lib1/sublib CC func2.c AR func2.a RANLIB func2.a <-- lib1/sublib CC func1.c AR func1.a RANLIB func1.a <-- lib1 --> lib2 CC func3.c AR func3.a RANLIB func3.a <-- lib2 CC main.c CCLD prog $ ls Makefile antimake.mk api.h lib1 lib2 main.c prog $ ./prog func1 func2 func3 main --------------------------------- We can now install it: --------------------------------- $ make install DESTDIR=./inst --> lib1 --> lib1/sublib make[2]: Nothing to be done for `install'. <-- lib1/sublib <-- lib1 --> lib2 make[1]: Nothing to be done for `install'. <-- lib2 INSTALL prog ./inst/usr/local/bin $ ls ./inst/usr/local/bin prog --------------------------------- Now we can create package that can be given to others. --------------------------------- $ make dist CHECK dist-gzip MKDIR test-subdirs-1.0 COPY test-subdirs-1.0 PACK test-subdirs-1.0.tar.gz $ ls Makefile api.h lib1 main.c test-subdirs-1.0.tar.gz antimake.mk inst lib2 prog $ tar tzf test-subdirs-1.0.tar.gz | sort test-subdirs-1.0/ test-subdirs-1.0/Makefile test-subdirs-1.0/antimake.mk test-subdirs-1.0/lib1/ test-subdirs-1.0/lib1/Makefile test-subdirs-1.0/lib1/func1.c test-subdirs-1.0/lib1/sublib/ test-subdirs-1.0/lib1/sublib/Makefile test-subdirs-1.0/lib1/sublib/func2.c test-subdirs-1.0/lib2/ test-subdirs-1.0/lib2/Makefile test-subdirs-1.0/lib2/func3.c test-subdirs-1.0/main.c --------------------------------- Clean the tree --------------------------------- $ make clean --> lib1 --> lib1/sublib CLEAN func2.a <-- lib1/sublib CLEAN func1.a <-- lib1 --> lib2 CLEAN func3.a <-- lib2 CLEAN prog $ ls Makefile antimake.mk api.h inst lib1 lib2 main.c test-subdirs-1.0.tar.gz --------------------------------- Test O= --------------------------------- $ mkdir -p build $ make O=build MKDIR Create lib1 --> lib1 MKDIR Create lib1/sublib --> lib1/sublib CC ../../../lib1/sublib/func2.c AR func2.a RANLIB func2.a <-- lib1/sublib CC ../../lib1/func1.c AR func1.a RANLIB func1.a <-- lib1 MKDIR Create lib2 --> lib2 CC ../../lib2/func3.c AR func3.a RANLIB func3.a <-- lib2 CC ../main.c CCLD prog $ ls Makefile api.h inst lib2 test-subdirs-1.0.tar.gz antimake.mk build lib1 main.c $ ls build Makefile antimake.mk lib1 lib2 prog --------------------------------- Done! skytools-3.2.6/lib/mk/antimake.mk0000755000000000000000000012140612166266754013624 0ustar #! /usr/bin/make -f # # antimake.mk - automake syntax with GNU Make # # Copyright (c) 2011 Marko Kreen # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # # Goals: # - Clean user Makefiles, by using automake syntax # - Clean output during build # - Optional ties with `autoconf` and `libtool` # - Automatic dependency tracking # - Avoid separate build step for Makefiles # - No extra tools needed except GNU Make # Usage without autoconf: # - copy antimake.mk into source dir, then: include antimake.mk # - copy/link antimake.mk into PATH, then: include $(shell antimake.mk) # # Usage with autoconf: # - Copy to antimake.mk.in at top dir, then process with autoconf # to antimake.mk and include that one in Makefiles. # # - Have config.mak.in that also includes antimake.mk. # Suggestion: the separate file should include antimake.mk # using $(abs_top_srcdir) to support separate build dir. # # - Include config and antimake.mk separately in user Makefiles ## ## Startup hacks ## # detect GNU make version, confuse others $(eval GNUMAKE380=1) GNUMAKE381=$(or ,$(GNUMAKE380)) define GNUMAKE382 = $(GNUMAKE381) endef # give error of too old ifeq ($(GNUMAKE381),) $(error GNU Make 3.81+ required) endif # extra targets if this file is executed directly ifeq ($(words $(MAKEFILE_LIST)), 1) .PHONY: show-location show-config # default: print location. For "include $(shell antimake.mk)"-style usage. show-location: @echo $(MAKEFILE_LIST) # show autoconfigurable variables show-config: @grep '@[^ ]*@$$' $(MAKEFILE_LIST) endif ## ## Allow this file to be processed through autoconf ## # # to extract autoconfigurable values: # $ grep '@[^ ]*@$' antimake.mk > config.mk.in # $ antimake.mk show-config > config.mk.in # ifneq ($(filter-out @%,@PACKAGE_NAME@),) PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PORTNAME = @PORTNAME@ EXEEXT = @EXEEXT@ HAVE_CC_DEPFLAG = @HAVE_CC_DEPFLAG@ # C language CC = @CC@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CFLAGS = @CFLAGS@ DEFS = @DEFS@ WFLAGS = @WFLAGS@ # linking LD = @LD@ LDFLAGS = @LDFLAGS@ LIBS = @LIBS@ # static and shared libs AR = @AR@ ARFLAGS = @ARFLAGS@ RANLIB = @RANLIB@ LIBTOOL = @LIBTOOL@ # other tools SHELL = @SHELL@ INSTALL = @INSTALL@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_DATA = @INSTALL_DATA@ MKDIR_P = @MKDIR_P@ SED = @SED@ AWK = @AWK@ GREP = @GREP@ EGREP = @EGREP@ STRIP = @STRIP@ # install locations prefix = @prefix@ exec_prefix = @exec_prefix@ bindir = @bindir@ includedir = @includedir@ sbindir = @sbindir@ libexecdir = @libexecdir@ datarootdir = @datarootdir@ datadir = @datadir@ sysconfdir = @sysconfdir@ docdir = @docdir@ mandir = @mandir@ libdir = @libdir@ localedir = @localedir@ pkgdatadir = @pkgdatadir@ pkgconfigdir = @pkgconfigdir@ aclocaldir = @aclocaldir@ # autoconf values for top dir abs_top_srcdir ?= @abs_top_srcdir@ abs_top_builddir ?= @abs_top_builddir@ nosub_top_srcdir ?= @top_srcdir@ nosub_top_builddir ?= @top_builddir@ endif # end of @xx@ values ## ## In case of missing autoconf values, provide sane defaults ## PACKAGE_NAME ?= package PACKAGE_TARNAME ?= $(PACKAGE_NAME) PACKAGE_VERSION ?= 0.0 PACKAGE_STRING ?= $(PACKAGE_NAME) $(PACKAGE_VERSION) PACKAGE_URL ?= PACKAGE_BUGREPORT ?= PORTNAME ?= unix EXEEXT ?= HAVE_CC_DEPFLAG ?= yes # C language CC ?= cc CPP ?= cpp CPPFLAGS ?= CFLAGS ?= -O -g DEFS ?= # warning flags are keps separately to allow easy override WFLAGS ?= -Wall # add them to main flags now CFLAGS += $(WFLAGS) # linking LD ?= ld LDFLAGS ?= LIBS ?= # static and shared libs LIBTOOL ?= libtool AR ?= ar ARFLAGS ?= rcs ifeq ($(ARFLAGS),rv) ARFLAGS = rcs endif RANLIB ?= ranlib # other tools SHELL ?= /bin/sh INSTALL ?= install INSTALL_PROGRAM ?= $(INSTALL) INSTALL_SCRIPT ?= $(INSTALL) INSTALL_DATA ?= $(INSTALL) MKDIR_P ?= mkdir -p SED ?= sed AWK ?= awk GREP ?= grep EGREP ?= grep -E STRIP ?= strip # install locations prefix ?= /usr/local exec_prefix ?= ${prefix} bindir ?= ${exec_prefix}/bin includedir ?= ${prefix}/include sbindir ?= ${exec_prefix}/sbin libexecdir ?= ${exec_prefix}/libexec datarootdir ?= ${prefix}/share datadir ?= ${datarootdir} sysconfdir ?= ${prefix}/etc docdir ?= ${datarootdir}/doc/${PACKAGE_TARNAME} mandir ?= ${datarootdir}/man libdir ?= ${exec_prefix}/lib localedir ?= ${datarootdir}/locale pkgdatadir ?= ${datarootdir}/${PACKAGE_TARNAME} pkgconfigdir ?= ${libdir}/pkgconfig aclocaldir ?= ${datarootdir}/aclocal # autoconf values for top dir abs_top_srcdir ?= $(CURDIR) abs_top_builddir ?= $(CURDIR) # make sure nosub vals are not empty ifeq ($(nosub_top_builddir),) nosub_top_builddir = . endif ifeq ($(nosub_top_srcdir),) nosub_top_srcdir = . endif ## ## Variables for user makefiles ## # current subdirectory location from top dir (foo/bar) SUBLOC ?= . # subdirectories in current directory SUBDIRS ?= # extra files for clean targets CLEANFILES ?= DISTCLEANFILES ?= MAINTAINERCLEANFILES ?= # Additional flags for Makefile use, to avoid need # to touch flags coming from autoconf/cmdline AM_DEFS ?= AM_CPPFLAGS ?= AM_CFLAGS ?= AM_LDFLAGS ?= AM_LIBTOOLFLAGS ?= AM_MAKEFLAGS ?= AM_LIBS ?= # libusual sources, for embedded usage USUAL_DIR ?= . # V=1 -> verbose build V ?= 0 # turn on function tracing AM_TRACE ?= # default formats for 'dist' AM_DIST_DEFAULT ?= gzip ## ## Non-user-serviceable area ## # Hacking: # # - Uppercase names are simple (late) variables, lowercase names - targets, # mixedcase - functions that need to be $(call)-ed. # # - Minimal amount of shell should be used here. # # - Minimal amount of := and $(eval) # # - It's useful to indent the expressions for easier understanding. # Later the indendation needs to be removed, as whitespace is significant for Make. # Several functions must not add any extra whitespace. # # GNU Make features in new versions: # # 3.80 - 2002-10-03: base version. $(eval) $(value) $(MAKEFILE_LIST) $(.VARIABLES) $(call fixes) # 3.81 - 2006-04-01: $(or), $(and), $(lastword), $(abspath), $(realpath), $(info), $(flavor) # 3.82 - 2010-07-28: private, undefine, define var := # # This file should use only features from 3.80 ## ## command helpers ## CCLD ?= $(CC) COMPILE ?= $(CC) $(AM_DEFS) $(DEFS) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LINK ?= $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_AR ?= $(AR) $(ARFLAGS) LIBTOOLCMD ?= $(LIBTOOL) $(LIBTOOLQ) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) RM = rm -f ## ## Internals ## # varables that can be set per-target with target_VAR # they appear as AM_foo. [Not supported: COMPILE] AM_TARGET_VARIABLES += CFLAGS CPPFLAGS LDFLAGS LIBTOOLFLAGS DEFS LIBS # list of language (rather compiler) names AM_LANGUAGES += C AM_BIG_PRIMARIES += LIBRARIES LTLIBRARIES PROGRAMS AM_SMALL_PRIMARIES += HEADERS SCRIPTS DATA MANS # list of destinations per primary AM_DESTINATIONS += bin lib libexec sbin \ data doc include locale man sysconf \ pkgdata pkgconfig aclocal \ noinst EXTRA # primaries where 'dist' is default AM_DIST_PRIMARIES += HEADERS AM_PRIMARIES = $(AM_BIG_PRIMARIES) $(AM_SMALL_PRIMARIES) # distclean does rm -rf on that OBJDIR = .objs # extension for objects OBJEXT = .o # extension for static libraries LIBEXT = .a # files that need to be converted to objects AM_SRCEXTS = $(foreach lang,$(AM_LANGUAGES),$(AM_LANG_$(lang)_SRCEXTS)) # target types - big/small: with/without objects # list of flags, 'noinst' is taken as dest, 'base' is always default AM_FLAGS = base nobase dist nodist ## configure non-defult target params AM_PROGRAMS_InstFunc = ProgInstall AM_LTLIBRARIES_InstFunc = LTLibInstall AM_LTLIBRARIES_OBJEXT = .lo AM_SCRIPTS_InstFunc = ScriptInstall AM_MANS_InstFunc = ManInstall # files to distribute am_DISTFILES := am_FINAL_DISTFILES = $(sort $(am_DISTFILES)) AM_DIST_BASE = $(PACKAGE_TARNAME)-$(PACKAGE_VERSION) AM_ALL_TARGETS = ## ## Make dependencies work ## HAVE_CC_DEPFLAG ?= yes ifeq ($(HAVE_CC_DEPFLAG),yes) OBJDEPS = -MD -MP -MT $@ -MF $@.d endif ## ## Quiet by default, 'make V=1' shows commands ## # 1-dir MkDir = $(MKDIR_P) $(1) # 1-fmt, 2-args Printf = printf $(1) $(2) CTX ?= ifeq ($(V), 0) E = @$(call Printf,"%-4s %-8s %s\n","$(CTX)") Q = @ LIBTOOLQ = --silent MAKEFLAGS += --no-print-directory else E = @true Q = LIBTOOLQ = --silent endif ## ## libtool activation ## # libtool activates when detects %.lo / %.la pattern LTCOMPILE = $(if $(filter %.lo,$@),$(LIBTOOLCMD) --mode=compile) LTLINK = $(if $(filter %.la %.lo,$^),$(LIBTOOLCMD) --mode=link) LTCLEAN = $(LIBTOOLCMD) --mode=clean ## ## Default setup for C ## AM_LANG_C_SRCEXTS = .c define AM_LANG_C_COMPILE $(E) "CC" $< $(Q) $(LTCOMPILE) $(COMPILE) $(OBJDEPS) -c -o $@ $< endef define AM_LANG_C_LINK $(E) "CCLD" $@ $(Q) $(LTLINK) $(LINK) $^ $(AM_LIBS) $(LIBS) $(AM_LT_RPATH) endef ## ## Various other shortcuts ## define ar_lib $(E) "AR" $@ $(Q) $(AM_AR) $@ $^ $(E) "RANLIB" $@ $(Q) $(RANLIB) $@ endef # 1 - dir define ProgInstall $(E) "INSTALL" "$< $(1)" $(Q) $(call MkDir,$(1)) $(Q) $(INSTALL_PROGRAM) $< $(1) endef # 1 - dir define ScriptInstall $(E) "INSTALL" "$< $(1)" $(Q) $(call MkDir,$(1)) $(Q) $(INSTALL_SCRIPT) $< $(1) endef # 1 - dir define DataInstall $(E) "INSTALL" "$< $(1)" $(Q) $(call MkDir,$(1)) $(Q) $(INSTALL_DATA) $< $(1) endef # 1 - dir, add manX subdir ManInstall = $(call DataInstall,$(1)/man$(call LastWord,$(subst ., ,$<))) # 1 - dir define LTLibInstall $(E) "INSTALL" "$< $(1)" $(Q) $(call MkDir,$(1)) $(Q) $(LIBTOOLCMD) --mode=install $(INSTALL) $< $(1) endef ## ## Create .srcext -> .obj mapping for a language ## # 1-tgt, 2-name, 3-srcext define LangObjTarget $(trace3) $$(OBJDIR)/$(1)/%$(OBJEXT) $$(OBJDIR)/$(1)/%.lo: %$(3) @$$(call MkDir,$$(dir $$@)) $$(AM_LANG_$(2)_COMPILE) endef # 1=tgt, 2=name define LangSetup $(trace2) $(foreach ext,$(AM_LANG_$(2)_SRCEXTS),$(call LangObjTarget,$(1),$(2),$(ext))$(NewLine)) endef ## ## Utility functions ## # for function debugging, put them at the start of body ifdef AM_TRACE trace1=$(warning $0('$1')) trace2=$(warning $0('$1','$2')) trace3=$(warning $0('$1','$2','$3')) trace4=$(warning $0('$1','$2','$3','$4')) trace5=$(warning $0('$1','$2','$3','$4','$5')) trace6=$(warning $0('$1','$2','$3','$4','$5','$6')) trace7=$(warning $0('$1','$2','$3','$4','$5','$6','$7')) trace8=$(warning $0('$1','$2','$3','$4','$5','$6','$7','$8')) trace9=$(warning $0('$1','$2','$3','$4','$5','$6','$7','$8','$9')) endif # for use inside $(eval) IFDEF = ifdef IFEQ = ifeq IFNEQ = ifneq ELSE = else ENDIF = endif # returns 'true' if $1==$2 Eq = $(if $(1)$(2),$(if $(findstring $(1),$(2)),$(if $(findstring $(2),$(1)),true)),true) Not = $(if $(1),,true) Neq = $(call Not,$(call Eq,$(1),$(2))) # replace [-./] with '_' CleanName = $(subst /,_,$(subst -,_,$(subst .,_,$(1)))) # return last word from word list LastWord = $(if $(1),$(word $(words $(1)),$(1))) Empty = Space = $(Empty) $(Empty) # twice to unconfuse syntax hiliters SQuote = ' SQuote = ' define NewLine endef # quote str for shell ShellQuote = '$(subst $(SQuote),'\$(SQuote)',$(1))' # replace extensions # 1-src ext list # 2-target ext # 3-source list ReplaceExts = $(foreach ext,$(1),$(patsubst %$(ext),%$(2),$(filter %$(ext),$(3)))) # objs with objdir from source list (1-cleantgt, 2-src list) SourceObjs = $(trace1)$(call SourceObjsExt,$(1),$(OBJEXT),$(2)) # objs with objdir from source list # 1-cleantgt, 2-objext, 3-srcs list SourceObjsExt = $(addprefix $(call JoinPath,$(OBJDIR),$(1))/, $(call ReplaceExts,$(AM_SRCEXTS),$(2),$(3))) # dependency files from object files, must match OBJDEPS DepFiles = $(wildcard $(addsuffix .d,$(1))) # per-target var override, 1=target, 2=varname # if foo_VAR exists, expand to: # build_foo install_foo clean_foo: AM_VAR = $(foo_VAR) # 1-tgt, 2-var, 3-final TgtVar2 = $(3): AM_$(2) = $$($(1)_$(2))$(NewLine) TgtVar = $(if $($(1)_$(2)),$(call TgtVar2,$(1),$(2),$(3))) # loop TgtVar over AM_TARGET_VARIABLES, 1=target, 2-final VarOverride = $(foreach var,$(AM_TARGET_VARIABLES),$(call TgtVar,$(1),$(var),$(2))) # check if actual target (.h, .exe) is nodist based on primary and flags # 1-prim 2-flags TargetNoDist = $(strip $(if $(filter nodist,$(2)), \ true, \ $(if $(filter dist,$(2)), \ , \ $(filter-out $(AM_DIST_PRIMARIES),$(1))))) # return sources that match language # 1-lang # 2-sources LangFiles = $(filter $(addprefix %,$(AM_LANG_$(1)_SRCEXTS)),$(2)) # return list of langs that match sources. # 1-sources LangList = $(strip $(foreach lang,$(AM_LANGUAGES),$(if $(call LangFiles,$(lang),$(1)),$(lang)))) # 1-sources LinkLangList = $(foreach lang,$(call LangList,$(1)),$(if $(AM_LANG_$(lang)_LINK),$(lang))) # pick linker variable based on sources, fallback to C # 1-sources DetectLinkVar = AM_LANG_$(call LastWord,C $(call LinkLangList,$(1)))_LINK # convert 'foo/bar' -> '../..' UpDirStep1 = $(subst /, ,$(1)) UpDirStep2 = $(foreach dir,$(call UpDirStep1,$(1)),../) UpDirStep3 = $(subst / ,/,$(call UpDirStep2,$(1))) UpDirStep4 = $(patsubst %/,%,$(call UpDirStep3,$(1))) UpDir = $(if $(filter-out .,$(1)),$(call UpDirStep4,$(1)),.) # # AntiMake requires that joining clean names must result in clean names. # # Thus: # JoinPath(.,foo) -> foo # JoinPath(foo,/abs) -> /abs # JoinPath(a/b,../c) -> a/c # JoinPath(a,../../b/c) -> ../b/c # # 1-path, 2-last name : foo => . | /foo => / | foo/bar => foo CutLastName = $(if $(filter $(2),$(1)),.,$(if $(filter /$(2),$(1)),/,$(patsubst %/$(2),%,$(1)))) # 1-path component, remove last elem : CutLast = $(call CutLastName,$(1),$(lastword $(subst /, ,$(1)))) # 1/2 : actual place where / is put JoinPathFinal = $(if $(filter /,$(1)),$(1)$(2),$(1)/$(2)) # 1/2 : second starts with ../, remove it and last component of $(1) JoinPath5 = $(call JoinPath,$(call CutLast,$(1)),$(patsubst ../%,%,$(2))) # 1/2: check if first ends with .. JoinPath4 = $(if $(filter .. %/..,$(1)),$(call JoinPathFinal,$(1),$(2)),$(call JoinPath5,$(1),$(2))) # 1/2 : check if second starts with ..; otherwise join JoinPath3 = $(if $(filter ../%,$(2)),$(call JoinPath4,$(1),$(2)),$(call JoinPathFinal,$(1),$(2))) # 1/2 : skips component if '.' JoinPath2 = $(if $(filter-out .,$(1)),$(if $(filter-out .,$(2)),$(call JoinPath3,$(1),$(2)),$(1)),$(2)) # 1/2 : check if b is absolute, otherwise fix minor problems JoinPath = $(trace2)$(if $(filter /%,$(2)),$(2),$(call JoinPath2,$(if $(filter /,$(1)),$(1),$(patsubst %/,%,$(1))),$(patsubst ./%,%,$(2)))) ## ## Parse target list variables ## ## pick out components from name, call function # 1-varname, 2-words, 3-func, 4-func arg # func args: 1-var, 2-prim, 3-dest, 4-flags, 5-arg ParseName = $(call $(3),$(1),$(filter $(AM_PRIMARIES),$(2)),$(filter $(AM_DESTINATIONS),$(2)),$(filter $(AM_FLAGS),$(2)),$(4)) ForEachList = $(foreach var,$(2),$(call ParseName,$(var),$(subst _, ,$(var)),$(1),$(3))) ## try reconstruct name, if fails, its a random variable # 1-var, 2-prim,3-dest,4-flags CheckName = $(if $(call Eq,$(subst _, ,$(1)),$(strip $(4) $(call LastWord,$(3)) $(call LastWord,$(2)))),$(1)) ## also check if variable is filled # 1-var, 2-prim,3-dest,4-flags CheckNameFull = $(if $(call CheckName,$(1),$(2),$(3),$(4)),$(if $($(1)),$(1))) ## ## Loop over targets in list variables ## ## call function on parsed target # 1-var, 2-prim, 3-dest, 4-flags, 5-func # func args: 1-cleantgt, 2-tgt, 3-prim, 4-dest, 5-flags ForEachTarget2 = $(foreach tgt,$($(1)),$(call $(5),$(call CleanName,$(tgt)),$(tgt),$(2),$(3),$(4))) ## ForEachTarget: call function on all targets in lists # 1-func, 2- var list # func args: 1-cleantgt, 2-tgt, 3-prim, 4-dest, 5-flags ForEachTarget = $(call ForEachList,ForEachTarget2,$(2),$(1)) ## EMBED_SUBDIRS relocations ## add subdir to files # 1-subdir, 2-file list RelocFiles = $(foreach f,$(2),$(if $(filter -%,$(f)),$(f),$(call JoinPath,$(1),$(f)))) # 1-dir, 2-pfx, 3-full RelocOneFlag2 = $(2)$(call JoinPath,$(1),$(patsubst $(2)%,%,$(3))) # 1-dir, 2-flag RelocOneFlag = $(if $(filter -L%,$(2)), \ $(call RelocOneFlag2,$(1),-L,$(2)), \ $(if $(filter -I%,$(2)), \ $(call RelocOneFlag2,$(1),-I,$(2)), \ $(2))) ## Relocate relative files, relative -I/-L, ignore -* # 1-dir, 2- flaglist RelocFlags = $(strip $(if $(filter-out .,$(1)), \ $(foreach flg,$(2),$(call RelocOneFlag,$(1),$(flg))), \ $(2))) ## Separate build dir relocation ## non-local source dir: -Isrc/include -> -Isrc/include -I$(srcdir)/src/include # 1-srcdir, 2-flag list FixIncludes = $(strip $(if $(filter-out .,$(1)), \ $(foreach flg,$(2),$(call FixIncludes2,$(1),$(flg))), \ $(2))) # 1-dir, 2-flg FixIncludes2 = $(if $(filter -I%,$(2)), \ $(call FixIncludes3,$(1),$(patsubst -I%,%,$(2))), \ $(2)) # 1-dir, 2-orig dir FixIncludes3 = -I$(2) -I$(call JoinPath,$(srcdir),$(2)) ## ## Makefile fragments ## ### fill values # abs_top_srcdir, abs_top_builddir # nosub_top_builddir, nosub_top_srcdir # 1 - subdir define SetDirs abs_builddir := $$(call JoinPath,$$(abs_top_builddir),$(1)) abs_srcdir := $$(call JoinPath,$$(abs_top_srcdir),$(1)) top_builddir := $$(call UpDir,$(1)) top_srcdir := $$(call JoinPath,$$(top_builddir),$$(nosub_top_srcdir)) builddir := . $(IFEQ) ($$(nosub_top_srcdir),$$(nosub_top_builddir)) srcdir := . $(ELSE) srcdir := $$(call JoinPath,$$(top_srcdir),$(1)) $(ENDIF) endef ## ## Embedded subdirs ## # func args: 1-cleantgt, 2-tgt, 3-prim, 4-dest, 5-flags define RelocBigTarget $(trace5) # move vars: $(foreach var,$(AM_TARGET_VARIABLES),$(NewLine)$$(am_PFX)_$(1)_$(var) := $$($(1)_$(var))) # move and relocate EXTRA_$$(am_PFX)_$(1)_SOURCES := $$(call RelocFiles,$$(am_DIR),$$(EXTRA_$(1)_SOURCES)) $$(am_PFX)_$(1)_SOURCES := $$(call RelocFiles,$$(am_DIR),$$($(1)_SOURCES)) $$(am_PFX)_$(1)_DEPENDENCIES := $$(call RelocFiles,$$(am_DIR),$$($(1)_DEPENDENCIES)) $$(am_PFX)_$(1)_LDADD := $$(call RelocFiles,$$(am_DIR),$$($(1)_LDADD)) $$(am_PFX)_$(1)_LIBADD := $$(call RelocFiles,$$(am_DIR),$$($(1)_LIBADD)) $$(am_PFX)_$(1)_CFLAGS := $$(call RelocFlags,$$(am_DIR),$$($(1)_CFLAGS)) $$(am_PFX)_$(1)_CPPFLAGS := $$(call RelocFlags,$$(am_DIR),$$($(1)_CPPFLAGS)) $$(am_PFX)_$(1)_LDFLAGS := $$(call RelocFlags,$$(am_DIR),$$($(1)_LDFLAGS)) # clean vars $(1)_SOURCES = $(1)_LDADD = $(1)_LIBADD = $(foreach var,$(AM_TARGET_VARIABLES),$(NewLine)$(1)_$(var) = ) endef ## pick actual func # func args: 1-cleantgt, 2-tgt, 3-prim, 4-dest, 5-flags define RelocTarget $(trace5) $(if $(filter $(AM_BIG_PRIMARIES),$(3)),$(call RelocBigTarget,$(1),$(2),$(3),$(4),$(5))) endef ## relocate target list # func args: 1-var, 2-prim, 3-dest, 4-flags, 5-arg define RelocTList $(trace5) # detect top and subdir target conflict - it's easier to detect # and error out than to work around the rare case $(IFNEQ) (,$$(filter $(2),$$(AM_BIG_PRIMARIES))) $(IFEQ) (.,$$(am_DIR)) am_TOP_NAMES += $$(foreach tgt,$$($(1)),$$(call CleanName,$$(tgt))) $(ELSE) $(IFNEQ) (,$$(filter $$(am_TOP_NAMES),$$(foreach tgt,$$($(1)),$$(call CleanName,$$(tgt))))) $$(error $$(NewLine)$$(NewLine)\ *** Target names used in top Makefile cannot be re-used in embedded Makefiles. $$(NewLine)\ *** The target variables (eg. _SOURCES) conflict is not handled yet) $(ENDIF) $(ENDIF) $(ENDIF) # move value under real_% $(IFEQ) ($(real_$(1)),) real_$(1) := $(ENDIF) real_$(1) += $$(call RelocFiles,$$(am_DIR),$$($(1))) $(1) = # remember in proper list $(IFEQ) ($(3),EXTRA) am_EXTRA_TARGETLISTS += real_$(1) $(ELSE) am_TARGETLISTS += real_$(1) $(ENDIF) endef ## process included values # 1-dir, 2-pfx, 3-tlist define EmbedProcess $(trace3) $(IFNEQ) ($$(filter $(1),$$(am_EMBED_DONE)),) $$(error Double entry in EMBED_SUBDIRS: $(1)) $(ENDIF) # init local vars am_DIR := $(1) am_LOC := $$(call JoinPath,$$(SUBLOC),$(1)) am_PFX := $(2) am_EMBED_DONE += $(1) # reloc & save vars am_DISTFILES += $$(call RelocFiles,$$(am_DIR),$$(EXTRA_DIST)) am_CLEANFILES += $$(call RelocFiles,$$(am_DIR),$$(CLEANFILES)) am_DISTCLEANFILES += $$(call RelocFiles,$$(am_DIR),$$(DISTCLEANFILES)) am_MAINTAINERCLEANFILES += $$(call RelocFiles,$$(am_DIR),$$(MAINTAINERCLEANFILES)) am_EMBED_TODO += $$(call RelocFiles,$$(am_DIR),$$(EMBED_SUBDIRS)) am_SUBDIRS += $$(call RelocFiles,$$(am_DIR),$$(SUBDIRS)) am_DIST_SUBDIRS += $$(call RelocFiles,$$(am_DIR),$$(DIST_SUBDIRS)) # clean vars for new dir EXTRA_DIST = CLEANFILES = DISTCLEANFILES = MAINTAINERCLEANFILES = EMBED_SUBDIRS = SUBDIRS = DIST_SUBDIRS = $(call SetDirs,$(call JoinPath,$(SUBLOC),$(1))) $(call ForEachTarget,RelocTarget,$(3)) $(call ForEachList,RelocTList,$(3)) endef ## read Makefile.am, process it # 1 - dir DoEmbed = $(trace1)$(strip \ $(if $(wildcard $(am_srcdir)/$(1)/Makefile.am), \ $(eval include $(am_srcdir)/$(1)/Makefile.am $(NewLine)) \ $(eval $(call EmbedProcess,$(1),$(call CleanName,$(1)),$(AM_NONEXTRA_TLISTS) $(AM_EXTRA_TLISTS))), \ $(error $(SUBLOC)/Makefile failure: $(call JoinPath,$(SUBLOC),$(1)/Makefile.am) not found.))) ## ## Fragments that build targets ## # Note that variable initialization order is important here # as some of them will be used immediately. ## ## Install target object ## # 1=cleantgt,2=rawtgt,3=prim,4=dest,5=flags define InstallTarget $(trace5) $(1)_DEST := $$(if $$($(4)dir),$$($(4)dir),$$(error '$(4)dir' empty))$(if $(filter nobase,$(5)),/$(dir $(2))) $(1)_InstFunc := $$(if $$(AM_$(3)_InstFunc),$$(AM_$(3)_InstFunc),DataInstall) # actual installation .PHONY: install_$(1) install: install_$(1) install_$(1): $(2) $$(call $$($(1)_InstFunc),$$(DESTDIR)$$($(1)_DEST)) # hack to pass -rpath to LTLIBRARIES on build time (1) $(2): AM_DEST = $$($(1)_DEST) endef # hack to pass -rpath to LTLIBRARIES on build time (2) %.la: AM_LT_RPATH = $(if $(AM_DEST),-rpath $(AM_DEST)) ## ## Rules for big target ## # 1-varname, 2-ifset, 3-ifnotset IfSet = $(if $(filter-out undefined,$(flavor $(1))),$(2),$(3)) # 1-clean, 2-raw, 3-prim PROGRAMS_Final = $(if $($(1)_EXT),$(2)$($(1)_EXT),$(2)$(EXEEXT)) # 1-clean, 2-raw, 3-prim LIBRARIES_Final = $(if $($(1)_EXT),$(2)$($(1)_EXT),$(patsubst %.a,%$(LIBEXT),$(2))) # calculate target file name # 1-clean, 2-raw, 3-prim FinalTargetFile = $(call IfSet,$(3)_Final,$(call $(3)_Final,$(1),$(2),$(3)),$(2)$($(1)_EXT)) # 1-objs FixObjs = $(patsubst %.a,%$(LIBEXT),$(1)) # 1=cleantgt,2=rawtgt,3=prim,4=dest,5=flags define BigTargetBuild $(trace5) AM_ALL_TARGETS += $(1) $(1)_ALLSRCS := $$($(1)_SOURCES) $$(EXTRA_$(1)_SOURCES) $$(nodist_$(1)_SOURCES) $$(nodist_EXTRA_$(1)_SOURCES) # calculate OBJS from SOURCES $(1)_OBJEXT := $$(if $$(AM_$(3)_OBJEXT),$$(AM_$(3)_OBJEXT),$$(OBJEXT)) $(1)_OBJS := $$(call SourceObjsExt,$(1),$$($(1)_OBJEXT), \ $$($(1)_SOURCES) $$(nodist_$(1)_SOURCES)) $(1)_OBJS_CLEAN := $$($(1)_OBJS) # include additional objects, move flags to _LIBS $(IFEQ) ($(3),PROGRAMS) $(1)_OBJS += $$(filter-out -%,$$($(1)_LDADD)) $(1)_LIBS += $$(filter -%,$$($(1)_LDADD)) $(ELSE) $(1)_OBJS += $$(filter-out -%,$$($(1)_LIBADD)) $(1)_LIBS += $$(filter -%,$$($(1)_LIBADD)) $(ENDIF) # autodetect linker, unless given $(IFEQ) ($($(1)_LINK),) $(1)_LINKVAR := $$(call DetectLinkVar,$$($(1)_ALLSRCS)) $(ELSE) $(1)_LINKVAR := $(1)_LINK $(ENDIF) # calculate target file name $(1)_FINAL = $(call FinalTargetFile,$(1),$(2),$(3)) # hook libtool into LTLIBRARIES cleanup $(IFEQ) ($(3),LTLIBRARIES) $(1)_RM = $$(LTCLEAN) $$(RM) $(ELSE) $(1)_RM = $$(RM) $(ENDIF) # fix includes in case of separate build dir $(1)_CPPFLAGS := $$(call FixIncludes,$$(srcdir),$$($(1)_CPPFLAGS)) $(1)_CFLAGS := $$(call FixIncludes,$$(srcdir),$$($(1)_CFLAGS)) # load dependencies -include .dummy. $$(call DepFiles, $$($(1)_OBJS)) # actual build, clean & install targets .PHONY: build_$(1) clean_$(1) # allow target-specific variables $$(eval $$(call VarOverride,$(1),$(call FinalTargetFile,$(1),$(2),$(3)))) # build and clean by default, unless flagged EXTRA $(IFNEQ) ($(4),EXTRA) all: build_$(1) $(ENDIF) clean: clean_$(1) # _DEPENDENCIES and nodist_SOURCES must exist before build starts. $$(call FixObjs,$$($(1)_OBJS)): $$($(1)_DEPENDENCIES) $$(nodist_$(1)_SOURCES) build_$(1): $$($(1)_FINAL) $$($(1)_FINAL): $$(call FixObjs,$$($(1)_OBJS)) @$$(call MkDir,$$(dir $$@)) $$($(if $(filter LIBRARIES,$(3)),ar_lib,$$($(1)_LINKVAR))) clean_$(1): $$(E) "CLEAN" "$$($(1)_FINAL)" $$(Q) $$($(1)_RM) -- $$($(1)_OBJS_CLEAN) $(if $(call TargetNoDist,$(3),$(5)),$$($(1)_FINAL)) DISTCLEANFILES += $$(nodist_$(1)_SOURCES) $$(nodist_EXTRA_$(1)_SOURCES) $(foreach lang,$(AM_LANGUAGES),$(call LangSetup,$(1),$(lang))) endef # 1=cleantgt,2=rawtgt,3=prim,4=dest,5=flags define BigTargetDist am_DISTFILES += $$(filter-out $$(nodist_EXTRA_$(1)_SOURCES) $$(nodist_$(1)_SOURCES),$$($(1)_SOURCES) \ $$(EXTRA_$(1)_SOURCES)) $(if $(call TargetNoDist,$(3),$(5)),,$$($(1)_FINAL)) endef # 1=cleantgt,2=rawtgt,3=prim,4=dest,5=flags define MakeBigTarget $(trace5) # build if first time $(IFEQ) ($(filter $(1),$(AM_ALL_TARGETS)),) $(call BigTargetBuild,$(1),$(2),$(3),$(4),$(5)) $(call BigTargetDist,$(1),$(2),$(3),$(4),$(5)) $(ELSE) # allow only EXTRA be double $(IFNEQ) ($(4),EXTRA) $$(error Target '$2' described listed several times) $(ENDIF) $(ENDIF) # call InstallTarget, for dest != (EXTRA, noinst) $(IFEQ) ($(filter EXTRA noinst,$(4)),) $(call InstallTarget,$(1),$$($(1)_FINAL),$(3),$(4),$(5)) $(ENDIF) endef ## ## Rules for small target ## # 1=cleantgt,2=rawtgt,3=prim,4=dest,5=flags define MakeSmallTarget $(trace5) AM_ALL_TARGETS += $(1) # should the target file be distributed or cleaned? $(IFEQ) ($(call TargetNoDist,$(3),$(5)),) am_DISTFILES += $(2) $(ELSE) CLEANFILES += $(2) $(ENDIF) # build if not EXTRA $(IFNEQ) ($(4),EXTRA) all: $(2) # install if not EXTRA or noinst $(IFNEQ) ($(4),noinst) $(call InstallTarget,$(1),$(2),$(3),$(4),$(5)) $(ENDIF) $(ENDIF) endef ## ## Fill GNU-style vars for subdir ## # preferred to top_srcdir/top_builddir topdir = $(top_builddir) ifneq ($(nosub_top_builddir),.) $(error Non-local builddir not supported) endif # initial locaton vars $(eval $(call SetDirs,$(SUBLOC))) ifneq ($(nosub_top_srcdir),$(nosub_top_builddir)) # use VPATH to find non-local sources VPATH += $(srcdir) # fix includes AM_CPPFLAGS := $(call FixIncludes,$(srcdir),$(AM_CPPFLAGS)) AM_CFLAGS := $(call FixIncludes,$(srcdir),$(AM_CFLAGS)) endif ## ## O= ## if given, create wrapper makefiles in target dir ## that include makefiles from source dir, then run ## make from target dir. ## ifneq ($(O),) # 1-makefile define WrapMakeFileCmd @$(call MkDir,$(dir $(O)/$(1))) @$(call Printf,'%s\n%s\n%s\n%s\n%s\n', \ 'abs_top_srcdir = $(CURDIR)' \ 'abs_top_builddir = $(call JoinPath,$(CURDIR),$(O))' \ 'nosub_top_srcdir = $(call UpDir,$(O))' \ 'nosub_top_builddir = .' \ 'include $(abs_top_srcdir)/$(1)') \ > $(O)/$(1) endef # 1-makefile WrapMakeFile = $(if $(wildcard $(O)/$(1)),,$(call WrapMakeFileCmd,$(1))$(NewLine)) # redirect whatever rule was given .PHONY: all $(MAKECMDGOALS) all $(filter-out all,$(MAKECMDGOALS)): $(if $(wildcard $(O)),,$(error O=$(O): Directory '$(O)' does not exist)) $(foreach mk,$(filter-out /%,$(MAKEFILE_LIST)),$(call WrapMakeFile,$(mk))) $(Q) $(MAKE) O= -C $(O) $(MAKECMDGOALS) # O=empty, this is main makefile else ## ## main targets, tie them with subdir and local targets ## # disable random rules .SUFFIXES: all: sub-all all-local clean: sub-clean clean-local install: sub-install install-local distclean: sub-distclean distclean-local maintainer-clean: sub-maintainer-clean maintainer-clean-local .PHONY: all clean install dist distclean maintainer-clean # -local are empty targets by default .PHONY: all-local clean-local install-local distclean-local maintainer-clean-local all-local clean-local install-local distclean-local maintainer-clean-local: ## ## Actual embedding starts ## AM_ALL_TLISTS2 = $(filter $(addprefix %,$(AM_PRIMARIES)),$(.VARIABLES)) AM_ALL_TLISTS = $(call ForEachList,CheckName,$(AM_ALL_TLISTS2)) AM_NONEXTRA_TLISTS = $(filter-out EXTRA_%,$(AM_ALL_TLISTS)) AM_EXTRA_TLISTS = $(filter EXTRA_%,$(AM_ALL_TLISTS)) am_srcdir := $(srcdir) am_DIR := . am_PFX := am_TARGETLISTS := am_EXTRA_TARGETLISTS := am_TOP_NAMES := # move top-level targets away $(eval $(call ForEachList,RelocTList,$(AM_NONEXTRA_TLISTS))) $(eval $(call ForEachList,RelocTList,$(AM_EXTRA_TLISTS))) am_SUBDIRS := $(SUBDIRS) am_DIST_SUBDIRS := $(DIST_SUBDIRS) am_DISTFILES := $(EXTRA_DIST) am_CLEANFILES := $(CLEANFILES) am_DISTCLEANFILES := $(DISTCLEANFILES) am_MAINTAINERCLEANFILES := $(MAINTAINERCLEANFILES) am_EMBED_NOW := $(EMBED_SUBDIRS) am_EMBED_DONE := am_EMBED_TODO := EXTRA_DIST = CLEANFILES = DISTCLEANFILES = MAINTAINERCLEANFILES = SUBDIRS = DIST_SUBDIRS = EMBED_SUBDIRS = $(foreach dir,$(am_EMBED_NOW),$(call DoEmbed,$(dir))) am_EMBED_NOW := $(am_EMBED_TODO) am_EMBED_TODO := $(foreach dir,$(am_EMBED_NOW),$(call DoEmbed,$(dir))) am_EMBED_NOW := $(am_EMBED_TODO) am_EMBED_TODO := $(foreach dir,$(am_EMBED_NOW),$(call DoEmbed,$(dir))) am_EMBED_NOW := $(am_EMBED_TODO) am_EMBED_TODO := $(if $(am_EMBED_NOW),$(error EMBED_SUBDIRS recursion limit reached...)) # embedding done, move variables back $(eval $(call SetDirs,$(SUBLOC))) CLEANFILES := $(am_CLEANFILES) DISTCLEANFILES := $(am_DISTCLEANFILES) MAINTAINERCLEANFILES := $(am_MAINTAINERCLEANFILES) SUBDIRS := $(am_SUBDIRS) DIST_SUBDIRS := $(am_DIST_SUBDIRS) EMBED_SUBDIRS := $(am_EMBED_DONE) am_CLEANFILES = am_DISTCLEANFILES = am_MAINTAINERCLEANFILES = am_DIST_SUBDIRS = am_SUBDIRS = am_EMBED_DONE = am_TARGETLISTS := $(sort $(am_TARGETLISTS)) am_EXTRA_TARGETLISTS := $(sort $(am_EXTRA_TARGETLISTS)) # avoid duplicate entries with am_TARGETLISTS am_EXTRA_TARGETLISTS := $(filter-out $(am_TARGETLISTS),$(am_EXTRA_TARGETLISTS)) # allow seeing moved lists AM_FLAGS += real ## EMBED_SUBDIRS end ## ## Launch target hooks ## amdir = $(dir $(realpath $(filter %/antimake.mk antimake.mk,$(MAKEFILE_LIST)))) # 1-feat name FeatFile = $(call JoinPath,$(amdir),amext-$(1).mk) # 1- fname LoadFeature = $(if $(wildcard $(call FeatFile,$(1))),$(eval include $(call FeatFile,$(1))),$(error Feature "$(call FeatFile,$(1))" is not available.)) $(foreach f,$(AM_FEATURES),$(call LoadFeature,$(f))) $(eval $(foreach hook,$(AM_TARGET_HOOKS),$(call ForEachTarget,$(hook),$(am_TARGETLISTS)))) $(eval $(foreach hook,$(AM_TARGET_HOOKS),$(call ForEachTarget,$(hook),$(am_EXTRA_TARGETLISTS)))) ## ## Now generate the rules ## ## check which target func to call # 1=cleantgt,2=rawtgt,3=prim,4=dest,5=flags MakeTarget = $(call $(if $(filter $(AM_BIG_PRIMARIES),$(3)),MakeBigTarget,MakeSmallTarget),$(1),$(2),$(3),$(4),$(5)) ## process all targets in one list # 1-list, 2-prim,3-dest,4-flags MakeTargetList = $(foreach tgt,$($(1)),$(call MakeTarget,$(call CleanName,$(tgt)),$(tgt),$(2),$(3),$(4))) ## process all target lists # 1=list names ProcessTargets = $(call ForEachTarget,MakeTarget,$(1)) # process non-EXTRA targets $(eval $(call ProcessTargets,$(am_TARGETLISTS))) # process EXTRA_* last, they may already have been processed $(eval $(call ProcessTargets,$(am_EXTRA_TARGETLISTS))) ## ## clean targets ## clean: ifdef CLEANFILES $(E) "CLEAN" $@ $(Q) $(RM) -- $(CLEANFILES) endif distclean: clean $(E) "DISTCLEAN" $@ $(Q) $(RM) -r -- $(OBJDIR) ifdef DISTCLEANFILES $(Q) $(RM) -- $(DISTCLEANFILES) endif maintainer-clean: clean $(E) "MAINTAINERCLEAN" $@ $(Q) $(RM) -r -- $(OBJDIR) ifdef DISTCLEANFILES $(Q) $(RM) -- $(DISTCLEANFILES) endif ifdef MAINTAINERCLEANFILES $(Q) $(RM) -- $(MAINTAINERCLEANFILES) endif ## ## actual subdir targets ## # 1-dir define MakeSubDir $(trace1) $(E) "MKDIR" "Create $(call JoinPath,$(SUBLOC),$(1))" $(Q) $(call MkDir,$(1)) $(Q) $(call Printf,"include $(call UpDir,$(1))/$(srcdir)/$(1)/Makefile\n") \ > $(1)/Makefile endef # 1-dir, 2-tgt define SubTarget $(trace2) $(if $(wildcard $(1)/Makefile),,$(call MakeSubDir,$(1))) $(E) "-->" "$(call JoinPath,$(SUBLOC),$(1))" $(Q) $(MAKE) -C $(1) $(2) $(E) "<--" "$(call JoinPath,$(SUBLOC),$(1))" endef sub-all sub-install sub-clean: $(foreach dir,$(SUBDIRS),$(call SubTarget,$(dir),$(subst sub-,,$@))$(NewLine)) # Avoid double dirs in DIST_SUBDIRS, without changing order am_DISTDIRS = $(SUBDIRS) $(foreach dir,$(DIST_SUBDIRS),$(if $(filter $(dir),$(SUBDIRS)),,$(dir))) sub-dist sub-distclean sub-maintainer-clean: $(foreach dir,$(am_DISTDIRS),$(call SubTarget,$(dir),$(subst sub-,,$@))$(NewLine)) .PHONY: sub-all sub-clean sub-install sub-dist sub-distclean sub-maintainer-clean ## ## actual dist targets ## DistTarget = $(foreach fmt,$(1),dist-$(fmt)) AM_DIST_ALL ?= gzip bzip2 xz zip AM_DIST_ALL_TGTS = $(call DistTarget,$(AM_DIST_ALL)) AM_DIST_DEF_TGTS = $(call DistTarget,$(AM_DIST_DEFAULT)) AM_FORMAT_gzip_EXT = tar.gz AM_FORMAT_gzip_CMD = tar chof - $(AM_DIST_BASE) | gzip > $(AM_DIST_BASE).$(AM_FORMAT_gzip_EXT) AM_FORMAT_bzip2_EXT = tar.bz2 AM_FORMAT_bzip2_CMD = tar chof - $(AM_DIST_BASE) | bzip2 > $(AM_DIST_BASE).$(AM_FORMAT_bzip2_EXT) AM_FORMAT_xz_EXT = tar.xz AM_FORMAT_xz_CMD = tar chof - $(AM_DIST_BASE) | xz > $(AM_DIST_BASE).$(AM_FORMAT_xz_EXT) AM_FORMAT_zip_EXT = zip AM_FORMAT_zip_CMD = zip -rq $(AM_DIST_BASE).$(AM_FORMAT_zip_EXT) $(AM_DIST_BASE) # 1-name define MakeDist $(E) "CHECK" $@ $(Q) $(MAKE) -s am-check-distfiles $(E) "MKDIR" $(AM_DIST_BASE) $(Q) $(RM) -r -- $(AM_DIST_BASE) $(AM_DIST_BASE).$(AM_DIST_$(1)_EXT) $(Q) $(call MkDir,$(AM_DIST_BASE)) $(E) "COPY" $(AM_DIST_BASE) $(Q) $(MAKE) -s am-show-distfiles | cpio -pmdL --quiet $(AM_DIST_BASE) $(E) "PACK" $(AM_DIST_BASE).$(AM_FORMAT_$(1)_EXT) $(Q) $(AM_FORMAT_$(1)_CMD) $(Q) $(RM) -r -- $(AM_DIST_BASE) endef .PHONY: dist $(AM_DIST_ALL_TGTS) dist: $(AM_DIST_DEF_TGTS) dist-all: $(AM_DIST_ALL_TGTS) $(AM_DIST_ALL_TGTS): $(call MakeDist,$(subst dist-,,$@)) # show list of files that need to be in final archive .PHONY: am-show-distfiles am-show-distfiles: $(foreach dir,$(am_DISTDIRS),@$(MAKE) $(AM_MAKEFLAGS) --no-print-directory -C $(dir) $@ $(NewLine)) $(foreach file,$(am_FINAL_DISTFILES),@$(call Printf,"$(call JoinPath,$(SUBLOC),$(file))\n") $(NewLine)) # do dependencies as separate step, in case building outputs anything .PHONY: am-check-distfiles am-check-distfiles: $(am_FINAL_DISTFILES) $(foreach dir,$(am_DISTDIRS),@$(MAKE) $(AM_MAKEFLAGS) -C $(dir) $@ $(NewLine)) ## ## debug target ## # 1=var define AmDebugShow $(if $($(1)),@$(call Printf,"$(1) = $($(1))\n")) $(NewLine) endef # 1=cleantgt,2=rawtgt,3=prim,4=dest,5=flags define AmDebugTarget $(trace5) $(foreach var,$(AM_DEBUG_TARGET_VARS),$(call AmDebugShow,$(1)_$(var))) @$(call Printf,"\n") endef # func args: 1-var, 2-prim, 3-dest, 4-flags CollectDests = $(filter-out noinst EXTRA,$(3)) AM_USED_DESTS = $(sort $(call ForEachList,CollectDests,$(am_TARGETLISTS))) AM_DEBUG_VARS = GNUMAKE380 GNUMAKE381 GNUMAKE382 MAKEFILE_LIST \ AM_LANGUAGES AM_FLAGS AM_DESTINATIONS \ AM_ALL_TARGETS EXEEXT am_FINAL_DISTFILES \ nosub_top_builddir nosub_top_srcdir \ abs_top_srcdir abs_top_builddir \ srcdir builddir top_srcdir top_builddir \ SUBDIRS EMBED_SUBDIRS DIST_SUBDIRS \ DISTFILES CLEANFILES DISTCLEANFILES MAINTAINERCLEANFILES AM_DEBUG_TARGET_VARS = SOURCES OBJS LINKVAR DEST USUAL_OBJS USUAL_SRCS EXT FINAL \ $(AM_TARGET_VARIABLES) AM_DEBUG_LANG_VARS = SRCEXTS am-debug: @$(call Printf,"\n==== Global Variables ====\n") $(foreach var,$(AM_DEBUG_VARS),$(call AmDebugShow,$(var))) @$(call Printf,"\n==== Per-language Variables ====\n") $(foreach lg,$(AM_LANGUAGES),$(foreach var,$(AM_DEBUG_LANG_VARS),$(call AmDebugShow,AM_LANG_$(lg)_$(var)))) @$(call Printf,"\n==== Per-target Variables ====\n") $(call ForEachTarget,AmDebugTarget,$(am_TARGETLISTS) $(am_EXTRA_TARGETLISTS)) @$(call Printf,"\n==== Active install directories ====\n") $(foreach dst,$(AM_USED_DESTS),@$(call Printf," $(dst)dir = $($(dst)dir)\n" $(NewLine))) ## ## regtests for basic tools ## AM_TESTS = 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 AM_TEST_1 = $(call Eq,a b c,a b c),$(call Eq,,),$(call Eq,a,aa),$(call Eq,a,a a) AM_TEST_1_RES = true,true,, AM_TEST_2 = $(call Neq,a,aa),$(call Neq,a,a) AM_TEST_2_RES = true, AM_TEST_3 = $(call CleanName,obj/foo-baz.x) AM_TEST_3_RES = obj_foo_baz_x AM_TEST_4 = $(call LastWord,a),$(call LastWord,a b c),$(call LastWord,) AM_TEST_4_RES = a,c, AM_TEST_5 = $(call ReplaceExts,.c .cpp X.foo,.o,s1.c s2.cpp s3X.foo s4.h) AM_TEST_5_RES = s1.o s2.o s3.o AM_TEST_5 = $(call LangList,foo.c c.foo),$(call LangList,foo.c c.foo f.cpp) AM_TEST_5_RES = C,C CXX AM_TEST_6 = $(call DetectLinkVar,foo.c c.foo),$(call DetectLinkVar,foo.c c.foo x.cpp),$(call DetectLinkVar,foo),$(call DetectLinkVar,) AM_TEST_6_RES = AM_LANG_C_LINK,AM_LANG_CXX_LINK,AM_LANG_C_LINK,AM_LANG_C_LINK AM_TEST_7 = $(call UpDir,foo)|$(call UpDir,)|$(call UpDir,.)|$(call UpDir,foo/bar)|$(call UpDir,a/b/c)| AM_TEST_7_RES = ..|.|.|../..|../../..| AM_TEST_8 = $(call JoinPath,.,.)|$(call JoinPath,,)|$(call JoinPath,a,.)|$(call JoinPath,.,b)|$(call JoinPath,a,b)|$(call JoinPath,a/b,../c)|$(call JoinPath,a/b,../../../c) AM_TEST_8_RES = .||a|b|a/b|a/c|../c define AM_TEST_9_EVAL $(IFEQ) ($$(AM_TEST_9_RES),OK) AM_TEST_9 = OK $(ELSE) AM_TEST_9 = fail $(ENDIF) endef AM_TEST_9_RES = OK $(eval $(AM_TEST_9_EVAL)) AM_TEST_10 = $(call CheckName,nobase_bin_PROGRAMS,PROGRAMS,bin,nobase)|$(call CheckName,a,a,,)|$(call CheckName,bin_bin_DATA,,bin bin,DATA) AM_TEST_10_RES = nobase_bin_PROGRAMS|a| AM_TEST_11_Show = $(4)-$(3)-$(2) AM_TEST_11 = $(call ForEachList,AM_TEST_11_Show,bin_PROGRAMS foo_DATA baz_foo base_nobase_dist_nodist_DATA_PROGRAMS) AM_TEST_11_RES = -bin-PROGRAMS --DATA -- base nobase dist nodist--DATA PROGRAMS AM_TEST_12 = $(call RelocFlags,sub/dir,-I. -I./foo -Lfoo/bar -I/inc -L/lib -lfoo) AM_TEST_12_RES = -Isub/dir -Isub/dir/foo -Lsub/dir/foo/bar -I/inc -L/lib -lfoo AM_TEST_13 = $(call TargetNoDist,HEADERS,)|$(call TargetNoDist,HEADERS,nodist)|$(call TargetNoDist,PROGRAMS,)|$(call TargetNoDist,PROGRAMS,dist) AM_TEST_13_RES = |true|PROGRAMS| AM_TEST_14 = $(call ShellQuote,foo'bar\')|$(call ShellQuote,as!d' \\ $$foo) AM_TEST_14_RES = 'foo'\''bar\'\'''|'as!d'\'' \\ $$foo' AM_TEST_15 = $(call JoinPath,sub/dir,../foo) , \ $(call JoinPath,sub/dir,../../foo) , \ $(call JoinPath,sub/dir,../../../foo) , \ $(call JoinPath,sub/dir/,../foo) , \ $(call JoinPath,/,./foo) , \ $(call JoinPath,..,../foo) , \ $(call JoinPath,/foo,../baz) , \ $(call JoinPath,/foo,../../baz) , \ $(call JoinPath,foo/..,./foo) AM_TEST_15_RES = sub/foo , foo , ../foo , sub/foo , /foo , ../../foo , /baz , /baz , foo/../foo AM_TEST_16_EXT = .foo AM_TEST_16 = $(call FinalTargetFile,prog,prog,PROGRAMS) | $(call FinalTargetFile,AM_TEST_16,AM_TEST_16,PROGRAMS) AM_TEST_16_RES = prog$(EXEEXT) | AM_TEST_16.foo AmTest = $(if $(call Eq,$($(1)),$($(2))),@$(call Printf,"$(1): OK\n"),@$(call Printf,"$(subst ",',$(1): FAIL: $($(1)) != $($(2))\n)"))$(NewLine) am-test: $(Q) test "$(call Eq,a b c,a b c),$(call Eq,,),$(call Eq,a,aa),$(call Eq,a,a a)" = "true,true,," $(foreach nr,$(AM_TESTS),$(call AmTest,AM_TEST_$(nr),AM_TEST_$(nr)_RES)) ## ## help target ## AmHelpNames = targets standalone internal config dests .PHONY: help $(foreach n,$(AmHelpNames),help-$(n) help-$(n)-local) $(foreach n,$(AmHelpNames),help-$(n)-local): help: $(foreach n,$(AmHelpNames),help-$(n) help-$(n)-local) # 1-var, 2-desc AmConf = @$(call Printf," %-27s %s=%s\n" $(call ShellQuote,$(2)) $(call ShellQuote,$(1)) $(call ShellQuote,$($(1)))) help-targets: @$(call Printf,"\n") @$(call Printf,"Main targets:\n") @$(call Printf," all Build all targets (default)\n") @$(call Printf," install Install files\n") @$(call Printf," dist Create source archive\n") @$(call Printf," clean Clean built files\n") @$(call Printf," distclean Clean configured files\n") @$(call Printf," maintainer-clean Delete anything that can be generated\n") help-standalone: @$(call Printf,"\n") @$(call Printf,"Standalone targets: (make -f antimake.mk)\n") @$(call Printf," show-location Prints full path to antimake.mk (default)\n") @$(call Printf," show-config Prints template config.mak.in\n") help-internal: @$(call Printf,"\n") @$(call Printf,"Internal targets:\n") @$(call Printf," am-show-distfiles Shows files that go into source archive\n") @$(call Printf," am-debug Shows variables that affect the build\n") @$(call Printf," am-test Regtest for internal functions\n") help-config: @$(call Printf,"\n") @$(call Printf,"Config variables and their current values:\n") $(call AmConf,CC,C compiler) $(call AmConf,CFLAGS,C compiler flags) $(call AmConf,CPPFLAGS,C pre-processor flags) $(call AmConf,LDFLAGS,Linker flags) help-dests: @$(call Printf,"\n") @$(call Printf,"Destinations for install [ prefix=$(prefix) ]:\n") $(foreach dst,$(AM_USED_DESTS),@$(call Printf," $(dst)dir = $($(dst)dir)\n") $(NewLine)) endif # O=empty skytools-3.2.6/lib/mk/amext-modes.mk0000644000000000000000000000446212166266754014255 0ustar # # Custom compilation modes # Compile one target several times with different # configuration variables. # # Sample: # CFLAGS = -O2 # bin_PROGRAM = prog # prog_SOURCES = prog.c # # AM_MODES = debug # CFLAGS_debug = -O0 -g # # Result: # prog - compiled with -O2 # prog-debug - compiled with -O0 -g # AM_MODES ?= # Variables that can be overrided with $(var)_$(mode) AM_MODE_OVERRIDE += CC CXX CFLAGS CPPFLAGS DEFS LDFLAGS LIBS ## add "-MODE" string before file extension # 1-mode, 2-filename ModeName = $(basename $(2))-$(1)$(suffix $(2)) ## add mode suffix to all plain filenames # 1-mode, 2-file names, options ModeFilter = $(foreach f,$(2),$(if $(filter /% -%,$(f)),$(f),$(call ModeName,$(1),$(f)))) ## set per-target var # 1-dbgvar, 2-var, 3-final ModeVarX = $(3): $(2) = $$($(1))$(NewLine) # 1-mode, 2-var, 3-final ModeVarOverride = $(if $($(2)_$(1)),$(call ModeVarX,$(2)_$(1),$(2),$(3))) # 1-mode, 2-final ModeVarOverrideAll = $(foreach v,$(AM_MODE_OVERRIDE),$(call ModeVarOverride,$(1),$(v),$(2))) ## copy target, replace vars # 1=cleantgt,2=rawtgt,3=prim,4=dest,5=flags,6=mode,7-newtgt,8-cleantgt,9-list define AddModes4 $(trace8) $(IFEQ) ($$(filter $(9),$$(am_TARGETLISTS)),) am_TARGETLISTS += $(9) $(ENDIF) # add new target to old list $(9) += $(7) # copy details, change library names $(8)_SOURCES := $$($(1)_SOURCES) nodist_$$(8)_SOURCES := $$(nodist_$(1)_SOURCES) $(8)_CPPFLAGS := $$($(1)_CPPFLAGS) $(8)_CFLAGS := $$($(1)_CFLAGS) $(8)_LDFLAGS := $$($(1)_LDFLAGS) $(8)_LIBADD := $$(call ModeFilter,$(6),$$($(1)_LIBADD)) $(8)_LDADD := $$(call ModeFilter,$(6),$$($(1)_LDADD)) # add variable replacements $(call ModeVarOverrideAll,$(6),$(call FinalTargetFile,$(8),$(7),$(3))) endef ## add clean name, list name # 1=cleantgt,2=rawtgt,3=prim,4=dest,5=flags,6-mode,7-raw tgt AddModes3 = $(call AddModes4,$(1),$(2),$(3),$(4),$(5),$(6),$(7),$(call CleanName,$(7)),$(subst $(Space),_,$(5)_$(4)_$(3))) ## loop over modes # 1=cleantgt,2=rawtgt,3=prim,4=dest,5=flags AddModes2 = $(trace5)$(foreach m,$(AM_MODES),$(call AddModes3,$(1),$(2),$(3),$(4),$(5),$(m),$(call ModeName,$(m),$(2)))) ## ignore small primaries # 1=cleantgt,2=rawtgt,3=prim,4=dest,5=flags AddModes = $(trace5)$(if $(filter $(3),$(AM_BIG_PRIMARIES)),$(call AddModes2,$(1),$(2),$(3),$(4),$(5))) # Install hook AM_TARGET_HOOKS += AddModes skytools-3.2.6/lib/mk/safe-headers.sed0000644000000000000000000000020012166266754014507 0ustar s/HAVE_/USUAL_&/g s/PACKAGE_/USUAL_&/g s/LT_OBJ/USUAL_&/g s/STRERROR_/USUAL_&/g s/CASSERT/USUAL_&/g s/WORDS_BIGENDIAN/USUAL_&/g skytools-3.2.6/lib/mk/amext-msvc.mk0000644000000000000000000000211112166266754014103 0ustar # # Support for MSVC toolchain. # # Usage: # 1. Install coreutils (printf, tail) and make from gnuwin32. # 2. Make sure VC env variables are loaded (PATH) # SHELL = cmd.exe ShellQuote = "$(subst $$, \$$, $(subst ",\",$(subst \,\\,$(1))))" EXEEXT = .exe LIBEXT = .lib OBJEXT = .obj CC = cl -nologo CFLAGS = -O2 $(WFLAGS) WFLAGS = -W2 -w24013 CPP = $(CC) -E LDFLAGS = LIBS = -lws2_32 -ladvapi32 AR = lib ARFLAGS = -nologo LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -Fe$(call vcFixPath,$@) Printf = printf $(subst %,%%,$(1)) $(2) MKDIR_P = md MkDir = if not exist $(call vcFixPath,$(1)) $(MKDIR_P) $(call vcFixPath,$(1)) vcFixPath = $(subst /,\,$(1)) vcFixLibs = $(patsubst %.a,%.lib,$(patsubst -l%,%.lib,$(1))) vcFixAll = $(call vcFixPath,$(call vcFixLibs,$(1))) define AM_LANG_C_COMPILE $(E) "CC" $< $(Q) $(COMPILE) -c -Fo$(call vcFixPath,$@) $< | tail -n+2 endef define AM_LANG_C_LINK $(E) "CCLD" $@ $(Q) $(LINK) $(call vcFixAll,$^ $(AM_LIBS) $(LIBS)) $(AM_LT_RPATH) endef define ar_lib $(E) "LIB" $@ $(Q) $(AR) $(ARFLAGS) -out:$(call vcFixPath,$@) $^ endef skytools-3.2.6/lib/mk/amext-libusual.mk0000644000000000000000000000315712166266754014766 0ustar # # Merge libusual sources with target sources # # Usage: # USUAL_DIR = # # _EMBED_LIBUSUAL = 1 # # It adds module sources into _SOURCES # and -I$(USUAL_DIR) to _CPPFLAGS. # ## ## Utility functions for libusual link ## _USUAL_DIR = $(call JoinPath,$(srcdir),$(USUAL_DIR)) # module names from sources (plus headers) UsualMods = $(trace1)$(shell $(_USUAL_DIR)/find_modules.sh $(_USUAL_DIR) $(wildcard $(addprefix $(srcdir)/,$(1)))) # full-path sources based on module list UsualSrcsFull = $(trace1)$(wildcard $(addprefix $(_USUAL_DIR)/usual/,$(addsuffix *.[ch],$(1)))) # remove USUAL_DIR UsualStrip = $(trace1)$(subst $(_USUAL_DIR)/,,$(1)) # simple-path sources based on module list UsualSrcs = $(call UsualStrip,$(call UsualSrcsFull,$(1))) # usual sources from user source file list UsualSources = $(if $(1),$(call UsualSrcsFull,$(call UsualMods,$(1)))) # 1=cleantgt,2=rawtgt,3=prim,4=dest,5=flags define EmbedLibUsual $(trace5) # embed libusual objects directly $(IFEQ) ($$($(1)_EMBED_LIBUSUAL),1) $(1)_SOURCES := $$($(1)_SOURCES) $$(call UsualSources, $$($(1)_SOURCES)) EXTRA_$(1)_SOURCES := $$(EXTRA_$(1)_SOURCES) \ $$(call UsualSources, \ $$(EXTRA_$(1)_SOURCES) \ $$(nodist_$(1)_SOURCES) \ $$(nodist_EXTRA_$(1)_SOURCES)) $(1)_CPPFLAGS += -I$$(USUAL_DIR) # add libusual to vpath $(IFEQ) ($$(filter $$(USUAL_DIR),$$(VPATH)),) VPATH += $$(USUAL_DIR) $(IFNEQ) ($$(srcdir),$$(builddir),) VPATH += $$(call JoinPath,$$(srcdir),$$(USUAL_DIR)) $(ENDIF) $(ENDIF) $(ENDIF) endef AM_TARGET_HOOKS += EmbedLibUsual EXTRA_DIST += $(_USUAL_DIR)/find_modules.sh $(_USUAL_DIR)/usual/config.h.in skytools-3.2.6/lib/mk/libusual.pc.in0000644000000000000000000000045412166266754014247 0ustar prefix=@prefix@ exec_prefix=@exec_prefix@ libdir=@libdir@ includedir=@includedir@ datarootdir=@datarootdir@ pkgdatadir=@pkgdatadir@ antimake=@pkgdatadir@/antimake.mk Name: libusual Description: Usual utility library for C Version: @PACKAGE_VERSION@ Cflags: -I${includedir} Libs: -L${libdir} -lusual skytools-3.2.6/sql/0000755000000000000000000000000012426435645011111 5ustar skytools-3.2.6/sql/ztestall.sh0000755000000000000000000000031012426435645013304 0ustar #! /bin/sh set -e for pg in pg83 pg84 pg90 pg91 pg92 pg93; do for mod in pgq pgq_coop pgq_node pgq_ext londiste; do echo " #### $pg/$mod ####" $pg make -s -C $mod clean test done done skytools-3.2.6/sql/pgq/0000755000000000000000000000000012426435645011700 5ustar skytools-3.2.6/sql/pgq/README.pgq0000644000000000000000000000045512426435645013352 0ustar Schema overview =============== pgq.consumer consumer name <> id mapping pgq.queue queue information pgq.subscription consumer registrations pgq.tick snapshots that group events into batches pgq.retry_queue events to be retried pgq.failed_queue events that have failed pgq.event_* data tables skytools-3.2.6/sql/pgq/Makefile0000644000000000000000000000266312426435645013347 0ustar EXTENSION = pgq EXT_VERSION = 3.2.6 EXT_OLD_VERSIONS = 3.1 3.1.1 3.1.2 3.1.3 3.1.6 3.2 3.2.3 DOCS = README.pgq PGQ_TESTS = pgq_core pgq_perms logutriga sqltriga $(trunc_test) # comment it out if not wanted UPGRADE_TESTS = pgq_init_upgrade $(PGQ_TESTS) clean Contrib_data = structure/uninstall_pgq.sql Contrib_regress = $(UPGRADE_TESTS) pgq_init_noext $(PGQ_TESTS) Extension_regress = $(UPGRADE_TESTS) pgq_init_ext $(PGQ_TESTS) # MAJORVERSION was defined in 8.4 trunc_test = $(if $(MAJORVERSION),trunctrg) include ../common-pgxs.mk SUBDIRS = lowlevel triggers # PGXS does not have subdir support, thus hack to recurse into lowlevel/ all: sub-all install: sub-install clean: sub-clean distclean: sub-distclean sub-all sub-install sub-clean sub-distclean: for dir in $(SUBDIRS); do \ $(MAKE) -C $$dir $(subst sub-,,$@) \ DESTDIR=$(DESTDIR) \ PG_CONFIG=$(PG_CONFIG) \ || exit 1; \ done lowlevel/pgq_lowlevel.sql: sub-all triggers/pgq_triggers.sql: sub-all # # docs # dox: cleandox $(SRCS) mkdir -p docs/html mkdir -p docs/sql $(CATSQL) --ndoc structure/tables.sql > docs/sql/schema.sql $(CATSQL) --ndoc structure/func_public.sql > docs/sql/external.sql $(CATSQL) --ndoc structure/func_internal.sql > docs/sql/internal.sql $(CATSQL) --ndoc structure/triggers.sql > docs/sql/triggers.sql $(NDOC) $(NDOCARGS) doxsync: for m in pgq_coop pgq_node pgq_ext londiste; do \ cp docs/Topics.txt docs/Languages.txt ../$$m/docs; \ done skytools-3.2.6/sql/pgq/functions/0000755000000000000000000000000012426435645013710 5ustar skytools-3.2.6/sql/pgq/functions/pgq.event_retry_raw.sql0000644000000000000000000000373512426435645020446 0ustar create or replace function pgq.event_retry_raw( x_queue text, x_consumer text, x_retry_after timestamptz, x_ev_id bigint, x_ev_time timestamptz, x_ev_retry integer, x_ev_type text, x_ev_data text, x_ev_extra1 text, x_ev_extra2 text, x_ev_extra3 text, x_ev_extra4 text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.event_retry_raw(12) -- -- Allows full control over what goes to retry queue. -- -- Parameters: -- x_queue - name of the queue -- x_consumer - name of the consumer -- x_retry_after - when the event should be processed again -- x_ev_id - event id -- x_ev_time - creation time -- x_ev_retry - retry count -- x_ev_type - user data -- x_ev_data - user data -- x_ev_extra1 - user data -- x_ev_extra2 - user data -- x_ev_extra3 - user data -- x_ev_extra4 - user data -- -- Returns: -- Event ID. -- ---------------------------------------------------------------------- declare q record; id bigint; begin select sub_id, queue_event_seq, sub_queue into q from pgq.consumer, pgq.queue, pgq.subscription where queue_name = x_queue and co_name = x_consumer and sub_consumer = co_id and sub_queue = queue_id; if not found then raise exception 'consumer not registered'; end if; id := x_ev_id; if id is null then id := nextval(q.queue_event_seq); end if; insert into pgq.retry_queue (ev_retry_after, ev_queue, ev_id, ev_time, ev_owner, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4) values (x_retry_after, q.sub_queue, id, x_ev_time, q.sub_id, x_ev_retry, x_ev_type, x_ev_data, x_ev_extra1, x_ev_extra2, x_ev_extra3, x_ev_extra4); return id; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq/functions/pgq.find_tick_helper.sql0000644000000000000000000000455412426435645020520 0ustar create or replace function pgq.find_tick_helper( in i_queue_id int4, in i_prev_tick_id int8, in i_prev_tick_time timestamptz, in i_prev_tick_seq int8, in i_min_count int8, in i_min_interval interval, out next_tick_id int8, out next_tick_time timestamptz, out next_tick_seq int8) as $$ -- ---------------------------------------------------------------------- -- Function: pgq.find_tick_helper(6) -- -- Helper function for pgq.next_batch_custom() to do extended tick search. -- ---------------------------------------------------------------------- declare sure boolean; can_set boolean; t record; cnt int8; ival interval; begin -- first, fetch last tick of the queue select tick_id, tick_time, tick_event_seq into t from pgq.tick where tick_queue = i_queue_id and tick_id > i_prev_tick_id order by tick_queue desc, tick_id desc limit 1; if not found then return; end if; -- check whether batch would end up within reasonable limits sure := true; can_set := false; if i_min_count is not null then cnt = t.tick_event_seq - i_prev_tick_seq; if cnt >= i_min_count then can_set := true; end if; if cnt > i_min_count * 2 then sure := false; end if; end if; if i_min_interval is not null then ival = t.tick_time - i_prev_tick_time; if ival >= i_min_interval then can_set := true; end if; if ival > i_min_interval * 2 then sure := false; end if; end if; -- if last tick too far away, do large scan if not sure then select tick_id, tick_time, tick_event_seq into t from pgq.tick where tick_queue = i_queue_id and tick_id > i_prev_tick_id and ((i_min_count is not null and (tick_event_seq - i_prev_tick_seq) >= i_min_count) or (i_min_interval is not null and (tick_time - i_prev_tick_time) >= i_min_interval)) order by tick_queue asc, tick_id asc limit 1; can_set := true; end if; if can_set then next_tick_id := t.tick_id; next_tick_time := t.tick_time; next_tick_seq := t.tick_event_seq; end if; return; end; $$ language plpgsql stable; skytools-3.2.6/sql/pgq/functions/pgq.current_event_table.sql0000644000000000000000000000256012426435645021254 0ustar create or replace function pgq.current_event_table(x_queue_name text) returns text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.current_event_table(1) -- -- Return active event table for particular queue. -- Event can be added to it without going via functions, -- e.g. by COPY. -- -- If queue is disabled and GUC session_replication_role <> 'replica' -- then raises exception. -- -- or expressed in a different way - an even table of a disabled queue -- is returned only on replica -- -- Note: -- The result is valid only during current transaction. -- -- Permissions: -- Actual insertion requires superuser access. -- -- Parameters: -- x_queue_name - Queue name. -- ---------------------------------------------------------------------- declare res text; disabled boolean; begin select queue_data_pfx || '_' || queue_cur_table::text, queue_disable_insert into res, disabled from pgq.queue where queue_name = x_queue_name; if not found then raise exception 'Event queue not found'; end if; if disabled then if current_setting('session_replication_role') <> 'replica' then raise exception 'Writing to queue disabled'; end if; end if; return res; end; $$ language plpgsql; -- no perms needed skytools-3.2.6/sql/pgq/functions/pgq.create_queue.sql0000644000000000000000000000517312426435645017674 0ustar create or replace function pgq.create_queue(i_queue_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.create_queue(1) -- -- Creates new queue with given name. -- -- Returns: -- 0 - queue already exists -- 1 - queue created -- Calls: -- pgq.grant_perms(i_queue_name); -- pgq.ticker(i_queue_name); -- pgq.tune_storage(i_queue_name); -- Tables directly manipulated: -- insert - pgq.queue -- create - pgq.event_N () inherits (pgq.event_template) -- create - pgq.event_N_0 .. pgq.event_N_M () inherits (pgq.event_N) -- ---------------------------------------------------------------------- declare tblpfx text; tblname text; idxpfx text; idxname text; sql text; id integer; tick_seq text; ev_seq text; n_tables integer; begin if i_queue_name is null then raise exception 'Invalid NULL value'; end if; -- check if exists perform 1 from pgq.queue where queue_name = i_queue_name; if found then return 0; end if; -- insert event id := nextval('pgq.queue_queue_id_seq'); tblpfx := 'pgq.event_' || id::text; idxpfx := 'event_' || id::text; tick_seq := 'pgq.event_' || id::text || '_tick_seq'; ev_seq := 'pgq.event_' || id::text || '_id_seq'; insert into pgq.queue (queue_id, queue_name, queue_data_pfx, queue_event_seq, queue_tick_seq) values (id, i_queue_name, tblpfx, ev_seq, tick_seq); select queue_ntables into n_tables from pgq.queue where queue_id = id; -- create seqs execute 'CREATE SEQUENCE ' || pgq.quote_fqname(tick_seq); execute 'CREATE SEQUENCE ' || pgq.quote_fqname(ev_seq); -- create data tables execute 'CREATE TABLE ' || pgq.quote_fqname(tblpfx) || ' () ' || ' INHERITS (pgq.event_template)'; for i in 0 .. (n_tables - 1) loop tblname := tblpfx || '_' || i::text; idxname := idxpfx || '_' || i::text || '_txid_idx'; execute 'CREATE TABLE ' || pgq.quote_fqname(tblname) || ' () ' || ' INHERITS (' || pgq.quote_fqname(tblpfx) || ')'; execute 'ALTER TABLE ' || pgq.quote_fqname(tblname) || ' ALTER COLUMN ev_id ' || ' SET DEFAULT nextval(' || quote_literal(ev_seq) || ')'; execute 'create index ' || quote_ident(idxname) || ' on ' || pgq.quote_fqname(tblname) || ' (ev_txid)'; end loop; perform pgq.grant_perms(i_queue_name); perform pgq.ticker(i_queue_name); perform pgq.tune_storage(i_queue_name); return 1; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq/functions/pgq.maint_tables_to_vacuum.sql0000644000000000000000000000323212426435645021743 0ustar create or replace function pgq.maint_tables_to_vacuum() returns setof text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.maint_tables_to_vacuum(0) -- -- Returns list of tablenames that need frequent vacuuming. -- -- The goal is to avoid hardcoding them into maintenance process. -- -- Returns: -- List of table names. -- ---------------------------------------------------------------------- declare scm text; tbl text; fqname text; begin -- assume autovacuum handles them fine if current_setting('autovacuum') = 'on' then return; end if; for scm, tbl in values ('pgq', 'subscription'), ('pgq', 'consumer'), ('pgq', 'queue'), ('pgq', 'tick'), ('pgq', 'retry_queue'), ('pgq_ext', 'completed_tick'), ('pgq_ext', 'completed_batch'), ('pgq_ext', 'completed_event'), ('pgq_ext', 'partial_batch'), --('pgq_node', 'node_location'), --('pgq_node', 'node_info'), ('pgq_node', 'local_state'), --('pgq_node', 'subscriber_info'), --('londiste', 'table_info'), ('londiste', 'seq_info'), --('londiste', 'applied_execute'), --('londiste', 'pending_fkeys'), ('txid', 'epoch'), ('londiste', 'completed') loop select n.nspname || '.' || t.relname into fqname from pg_class t, pg_namespace n where n.oid = t.relnamespace and n.nspname = scm and t.relname = tbl; if found then return next fqname; end if; end loop; return; end; $$ language plpgsql; skytools-3.2.6/sql/pgq/functions/pgq.insert_event.sql0000644000000000000000000000357012426435645017731 0ustar create or replace function pgq.insert_event(queue_name text, ev_type text, ev_data text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.insert_event(3) -- -- Insert a event into queue. -- -- Parameters: -- queue_name - Name of the queue -- ev_type - User-specified type for the event -- ev_data - User data for the event -- -- Returns: -- Event ID -- Calls: -- pgq.insert_event(7) -- ---------------------------------------------------------------------- begin return pgq.insert_event(queue_name, ev_type, ev_data, null, null, null, null); end; $$ language plpgsql; create or replace function pgq.insert_event( queue_name text, ev_type text, ev_data text, ev_extra1 text, ev_extra2 text, ev_extra3 text, ev_extra4 text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.insert_event(7) -- -- Insert a event into queue with all the extra fields. -- -- Parameters: -- queue_name - Name of the queue -- ev_type - User-specified type for the event -- ev_data - User data for the event -- ev_extra1 - Extra data field for the event -- ev_extra2 - Extra data field for the event -- ev_extra3 - Extra data field for the event -- ev_extra4 - Extra data field for the event -- -- Returns: -- Event ID -- Calls: -- pgq.insert_event_raw(11) -- Tables directly manipulated: -- insert - pgq.insert_event_raw(11), a C function, inserts into current event_N_M table -- ---------------------------------------------------------------------- begin return pgq.insert_event_raw(queue_name, null, now(), null, null, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4); end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq/functions/pgq.event_retry.sql0000644000000000000000000000460612426435645017573 0ustar create or replace function pgq.event_retry( x_batch_id bigint, x_event_id bigint, x_retry_time timestamptz) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.event_retry(3a) -- -- Put the event into retry queue, to be processed again later. -- -- Parameters: -- x_batch_id - ID of active batch. -- x_event_id - event id -- x_retry_time - Time when the event should be put back into queue -- -- Returns: -- 1 - success -- 0 - event already in retry queue -- Calls: -- None -- Tables directly manipulated: -- insert - pgq.retry_queue -- ---------------------------------------------------------------------- begin insert into pgq.retry_queue (ev_retry_after, ev_queue, ev_id, ev_time, ev_txid, ev_owner, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4) select x_retry_time, sub_queue, ev_id, ev_time, NULL, sub_id, coalesce(ev_retry, 0) + 1, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4 from pgq.get_batch_events(x_batch_id), pgq.subscription where sub_batch = x_batch_id and ev_id = x_event_id; if not found then raise exception 'event not found'; end if; return 1; -- dont worry if the event is already in queue exception when unique_violation then return 0; end; $$ language plpgsql security definer; create or replace function pgq.event_retry( x_batch_id bigint, x_event_id bigint, x_retry_seconds integer) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.event_retry(3b) -- -- Put the event into retry queue, to be processed later again. -- -- Parameters: -- x_batch_id - ID of active batch. -- x_event_id - event id -- x_retry_seconds - Time when the event should be put back into queue -- -- Returns: -- 1 - success -- 0 - event already in retry queue -- Calls: -- pgq.event_retry(3a) -- Tables directly manipulated: -- None -- ---------------------------------------------------------------------- declare new_retry timestamptz; begin new_retry := current_timestamp + ((x_retry_seconds::text || ' seconds')::interval); return pgq.event_retry(x_batch_id, x_event_id, new_retry); end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq/functions/pgq.get_batch_events.sql0000644000000000000000000000176112426435645020530 0ustar create or replace function pgq.get_batch_events( in x_batch_id bigint, out ev_id bigint, out ev_time timestamptz, out ev_txid bigint, out ev_retry int4, out ev_type text, out ev_data text, out ev_extra1 text, out ev_extra2 text, out ev_extra3 text, out ev_extra4 text) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_batch_events(1) -- -- Get all events in batch. -- -- Parameters: -- x_batch_id - ID of active batch. -- -- Returns: -- List of events. -- ---------------------------------------------------------------------- declare sql text; begin sql := pgq.batch_event_sql(x_batch_id); for ev_id, ev_time, ev_txid, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4 in execute sql loop return next; end loop; return; end; $$ language plpgsql; -- no perms needed skytools-3.2.6/sql/pgq/functions/pgq.grant_perms.sql0000644000000000000000000000603112426435645017540 0ustar create or replace function pgq.grant_perms(x_queue_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.grant_perms(1) -- -- Make event tables readable by public. -- -- Parameters: -- x_queue_name - Name of the queue. -- -- Returns: -- nothing -- ---------------------------------------------------------------------- declare q record; i integer; pos integer; tbl_perms text; seq_perms text; dst_schema text; dst_table text; part_table text; begin select * from pgq.queue into q where queue_name = x_queue_name; if not found then raise exception 'Queue not found'; end if; -- split data table name to components pos := position('.' in q.queue_data_pfx); if pos > 0 then dst_schema := substring(q.queue_data_pfx for pos - 1); dst_table := substring(q.queue_data_pfx from pos + 1); else dst_schema := 'public'; dst_table := q.queue_data_pfx; end if; -- tick seq, normal users don't need to modify it execute 'grant select on ' || pgq.quote_fqname(q.queue_tick_seq) || ' to public'; -- event seq execute 'grant select on ' || pgq.quote_fqname(q.queue_event_seq) || ' to public'; execute 'grant usage on ' || pgq.quote_fqname(q.queue_event_seq) || ' to pgq_admin'; -- set grants on parent table perform pgq._grant_perms_from('pgq', 'event_template', dst_schema, dst_table); -- set grants on real event tables for i in 0 .. q.queue_ntables - 1 loop part_table := dst_table || '_' || i::text; perform pgq._grant_perms_from('pgq', 'event_template', dst_schema, part_table); end loop; return 1; end; $$ language plpgsql security definer; create or replace function pgq._grant_perms_from(src_schema text, src_table text, dst_schema text, dst_table text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.grant_perms_from(1) -- -- Copy grants from one table to another. -- Workaround for missing GRANTS option for CREATE TABLE LIKE. -- ---------------------------------------------------------------------- declare fq_table text; sql text; g record; q_grantee text; begin fq_table := quote_ident(dst_schema) || '.' || quote_ident(dst_table); for g in select grantor, grantee, privilege_type, is_grantable from information_schema.table_privileges where table_schema = src_schema and table_name = src_table loop if g.grantee = 'PUBLIC' then q_grantee = 'public'; else q_grantee = quote_ident(g.grantee); end if; sql := 'grant ' || g.privilege_type || ' on ' || fq_table || ' to ' || q_grantee; if g.is_grantable = 'YES' then sql := sql || ' with grant option'; end if; execute sql; end loop; return 1; end; $$ language plpgsql strict; skytools-3.2.6/sql/pgq/functions/pgq.tune_storage.sql0000644000000000000000000000251612426435645017722 0ustar create or replace function pgq.tune_storage(i_queue_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.tune_storage(1) -- -- Tunes storage settings for queue data tables -- ---------------------------------------------------------------------- declare tbl text; tbloid oid; q record; i int4; sql text; pgver int4; begin pgver := current_setting('server_version_num'); select * into q from pgq.queue where queue_name = i_queue_name; if not found then return 0; end if; for i in 0 .. (q.queue_ntables - 1) loop tbl := q.queue_data_pfx || '_' || i::text; -- set fillfactor sql := 'alter table ' || tbl || ' set (fillfactor = 100'; -- autovacuum for 8.4+ if pgver >= 80400 then sql := sql || ', autovacuum_enabled=off, toast.autovacuum_enabled =off'; end if; sql := sql || ')'; execute sql; -- autovacuum for 8.3 if pgver < 80400 then tbloid := tbl::regclass::oid; delete from pg_catalog.pg_autovacuum where vacrelid = tbloid; insert into pg_catalog.pg_autovacuum values (tbloid, false, -1,-1,-1,-1,-1,-1,-1,-1); end if; end loop; return 1; end; $$ language plpgsql strict; skytools-3.2.6/sql/pgq/functions/pgq.maint_rotate_tables.sql0000644000000000000000000000774012426435645021247 0ustar create or replace function pgq.maint_rotate_tables_step1(i_queue_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.maint_rotate_tables_step1(1) -- -- Rotate tables for one queue. -- -- Parameters: -- i_queue_name - Name of the queue -- -- Returns: -- 0 -- ---------------------------------------------------------------------- declare badcnt integer; cf record; nr integer; tbl text; lowest_tick_id int8; lowest_xmin int8; begin -- check if needed and load record select * from pgq.queue into cf where queue_name = i_queue_name and queue_rotation_period is not null and queue_switch_step2 is not null and queue_switch_time + queue_rotation_period < current_timestamp for update; if not found then return 0; end if; -- if DB is in invalid state, stop if txid_current() < cf.queue_switch_step1 then raise exception 'queue % maint failure: step1=%, current=%', i_queue_name, cf.queue_switch_step1, txid_current(); end if; -- find lowest tick for that queue select min(sub_last_tick) into lowest_tick_id from pgq.subscription where sub_queue = cf.queue_id; -- if some consumer exists if lowest_tick_id is not null then -- is the slowest one still on previous table? select txid_snapshot_xmin(tick_snapshot) into lowest_xmin from pgq.tick where tick_queue = cf.queue_id and tick_id = lowest_tick_id; if not found then raise exception 'queue % maint failure: tick % not found', i_queue_name, lowest_tick_id; end if; if lowest_xmin <= cf.queue_switch_step2 then return 0; -- skip rotation then end if; end if; -- nobody on previous table, we can rotate -- calc next table number and name nr := cf.queue_cur_table + 1; if nr = cf.queue_ntables then nr := 0; end if; tbl := cf.queue_data_pfx || '_' || nr::text; -- there may be long lock on the table from pg_dump, -- detect it and skip rotate then begin execute 'lock table ' || pgq.quote_fqname(tbl) || ' nowait'; execute 'truncate ' || pgq.quote_fqname(tbl); exception when lock_not_available then -- cannot truncate, skipping rotate return 0; end; -- remember the moment update pgq.queue set queue_cur_table = nr, queue_switch_time = current_timestamp, queue_switch_step1 = txid_current(), queue_switch_step2 = NULL where queue_id = cf.queue_id; -- Clean ticks by using step2 txid from previous rotation. -- That should keep all ticks for all batches that are completely -- in old table. This keeps them for longer than needed, but: -- 1. we want the pgq.tick table to be big, to avoid Postgres -- accitentally switching to seqscans on that. -- 2. that way we guarantee to consumers that they an be moved -- back on the queue at least for one rotation_period. -- (may help in disaster recovery) delete from pgq.tick where tick_queue = cf.queue_id and txid_snapshot_xmin(tick_snapshot) < cf.queue_switch_step2; return 0; end; $$ language plpgsql; -- need admin access create or replace function pgq.maint_rotate_tables_step2() returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.maint_rotate_tables_step2(0) -- -- Stores the txid when the rotation was visible. It should be -- called in separate transaction than pgq.maint_rotate_tables_step1() -- ---------------------------------------------------------------------- begin update pgq.queue set queue_switch_step2 = txid_current() where queue_switch_step2 is null; return 0; end; $$ language plpgsql; -- need admin access skytools-3.2.6/sql/pgq/functions/pgq.batch_event_sql.sql0000644000000000000000000001230712426435645020363 0ustar create or replace function pgq.batch_event_sql(x_batch_id bigint) returns text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.batch_event_sql(1) -- Creates SELECT statement that fetches events for this batch. -- -- Parameters: -- x_batch_id - ID of a active batch. -- -- Returns: -- SQL statement. -- ---------------------------------------------------------------------- -- ---------------------------------------------------------------------- -- Algorithm description: -- Given 2 snapshots, sn1 and sn2 with sn1 having xmin1, xmax1 -- and sn2 having xmin2, xmax2 create expression that filters -- right txid's from event table. -- -- Simplest solution would be -- > WHERE ev_txid >= xmin1 AND ev_txid <= xmax2 -- > AND NOT txid_visible_in_snapshot(ev_txid, sn1) -- > AND txid_visible_in_snapshot(ev_txid, sn2) -- -- The simple solution has a problem with long transactions (xmin1 very low). -- All the batches that happen when the long tx is active will need -- to scan all events in that range. Here is 2 optimizations used: -- -- 1) Use [xmax1..xmax2] for range scan. That limits the range to -- txids that actually happened between two snapshots. For txids -- in the range [xmin1..xmax1] look which ones were actually -- committed between snapshots and search for them using exact -- values using IN (..) list. -- -- 2) As most TX are short, there could be lot of them that were -- just below xmax1, but were committed before xmax2. So look -- if there are ID's near xmax1 and lower the range to include -- them, thus decresing size of IN (..) list. -- ---------------------------------------------------------------------- declare rec record; sql text; tbl text; arr text; part text; select_fields text; retry_expr text; batch record; begin select s.sub_last_tick, s.sub_next_tick, s.sub_id, s.sub_queue, txid_snapshot_xmax(last.tick_snapshot) as tx_start, txid_snapshot_xmax(cur.tick_snapshot) as tx_end, last.tick_snapshot as last_snapshot, cur.tick_snapshot as cur_snapshot into batch from pgq.subscription s, pgq.tick last, pgq.tick cur where s.sub_batch = x_batch_id and last.tick_queue = s.sub_queue and last.tick_id = s.sub_last_tick and cur.tick_queue = s.sub_queue and cur.tick_id = s.sub_next_tick; if not found then raise exception 'batch not found'; end if; -- load older transactions arr := ''; for rec in -- active tx-es in prev_snapshot that were committed in cur_snapshot select id1 from txid_snapshot_xip(batch.last_snapshot) id1 left join txid_snapshot_xip(batch.cur_snapshot) id2 on (id1 = id2) where id2 is null order by 1 desc loop -- try to avoid big IN expression, so try to include nearby -- tx'es into range if batch.tx_start - 100 <= rec.id1 then batch.tx_start := rec.id1; else if arr = '' then arr := rec.id1::text; else arr := arr || ',' || rec.id1::text; end if; end if; end loop; -- must match pgq.event_template select_fields := 'select ev_id, ev_time, ev_txid, ev_retry, ev_type,' || ' ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4'; retry_expr := ' and (ev_owner is null or ev_owner = ' || batch.sub_id::text || ')'; -- now generate query that goes over all potential tables sql := ''; for rec in select xtbl from pgq.batch_event_tables(x_batch_id) xtbl loop tbl := pgq.quote_fqname(rec.xtbl); -- this gets newer queries that definitely are not in prev_snapshot part := select_fields || ' from pgq.tick cur, pgq.tick last, ' || tbl || ' ev ' || ' where cur.tick_id = ' || batch.sub_next_tick::text || ' and cur.tick_queue = ' || batch.sub_queue::text || ' and last.tick_id = ' || batch.sub_last_tick::text || ' and last.tick_queue = ' || batch.sub_queue::text || ' and ev.ev_txid >= ' || batch.tx_start::text || ' and ev.ev_txid <= ' || batch.tx_end::text || ' and txid_visible_in_snapshot(ev.ev_txid, cur.tick_snapshot)' || ' and not txid_visible_in_snapshot(ev.ev_txid, last.tick_snapshot)' || retry_expr; -- now include older tx-es, that were ongoing -- at the time of prev_snapshot if arr <> '' then part := part || ' union all ' || select_fields || ' from ' || tbl || ' ev ' || ' where ev.ev_txid in (' || arr || ')' || retry_expr; end if; if sql = '' then sql := part; else sql := sql || ' union all ' || part; end if; end loop; if sql = '' then raise exception 'could not construct sql for batch %', x_batch_id; end if; return sql || ' order by 1'; end; $$ language plpgsql; -- no perms needed skytools-3.2.6/sql/pgq/functions/pgq.next_batch.sql0000644000000000000000000001740012426435645017340 0ustar create or replace function pgq.next_batch_info( in i_queue_name text, in i_consumer_name text, out batch_id int8, out cur_tick_id int8, out prev_tick_id int8, out cur_tick_time timestamptz, out prev_tick_time timestamptz, out cur_tick_event_seq int8, out prev_tick_event_seq int8) as $$ -- ---------------------------------------------------------------------- -- Function: pgq.next_batch_info(2) -- -- Makes next block of events active. -- -- If it returns NULL, there is no events available in queue. -- Consumer should sleep then. -- -- The values from event_id sequence may give hint how big the -- batch may be. But they are inexact, they do not give exact size. -- Client *MUST NOT* use them to detect whether the batch contains any -- events at all - the values are unfit for that purpose. -- -- Parameters: -- i_queue_name - Name of the queue -- i_consumer_name - Name of the consumer -- -- Returns: -- batch_id - Batch ID or NULL if there are no more events available. -- cur_tick_id - End tick id. -- cur_tick_time - End tick time. -- cur_tick_event_seq - Value from event id sequence at the time tick was issued. -- prev_tick_id - Start tick id. -- prev_tick_time - Start tick time. -- prev_tick_event_seq - value from event id sequence at the time tick was issued. -- Calls: -- pgq.next_batch_custom(5) -- Tables directly manipulated: -- None -- ---------------------------------------------------------------------- begin select f.batch_id, f.cur_tick_id, f.prev_tick_id, f.cur_tick_time, f.prev_tick_time, f.cur_tick_event_seq, f.prev_tick_event_seq into batch_id, cur_tick_id, prev_tick_id, cur_tick_time, prev_tick_time, cur_tick_event_seq, prev_tick_event_seq from pgq.next_batch_custom(i_queue_name, i_consumer_name, NULL, NULL, NULL) f; return; end; $$ language plpgsql; create or replace function pgq.next_batch( in i_queue_name text, in i_consumer_name text) returns int8 as $$ -- ---------------------------------------------------------------------- -- Function: pgq.next_batch(2) -- -- Old function that returns just batch_id. -- -- Parameters: -- i_queue_name - Name of the queue -- i_consumer_name - Name of the consumer -- -- Returns: -- Batch ID or NULL if there are no more events available. -- ---------------------------------------------------------------------- declare res int8; begin select batch_id into res from pgq.next_batch_info(i_queue_name, i_consumer_name); return res; end; $$ language plpgsql; create or replace function pgq.next_batch_custom( in i_queue_name text, in i_consumer_name text, in i_min_lag interval, in i_min_count int4, in i_min_interval interval, out batch_id int8, out cur_tick_id int8, out prev_tick_id int8, out cur_tick_time timestamptz, out prev_tick_time timestamptz, out cur_tick_event_seq int8, out prev_tick_event_seq int8) as $$ -- ---------------------------------------------------------------------- -- Function: pgq.next_batch_custom(5) -- -- Makes next block of events active. Block size can be tuned -- with i_min_count, i_min_interval parameters. Events age can -- be tuned with i_min_lag. -- -- If it returns NULL, there is no events available in queue. -- Consumer should sleep then. -- -- The values from event_id sequence may give hint how big the -- batch may be. But they are inexact, they do not give exact size. -- Client *MUST NOT* use them to detect whether the batch contains any -- events at all - the values are unfit for that purpose. -- -- Note: -- i_min_lag together with i_min_interval/i_min_count is inefficient. -- -- Parameters: -- i_queue_name - Name of the queue -- i_consumer_name - Name of the consumer -- i_min_lag - Consumer wants events older than that -- i_min_count - Consumer wants batch to contain at least this many events -- i_min_interval - Consumer wants batch to cover at least this much time -- -- Returns: -- batch_id - Batch ID or NULL if there are no more events available. -- cur_tick_id - End tick id. -- cur_tick_time - End tick time. -- cur_tick_event_seq - Value from event id sequence at the time tick was issued. -- prev_tick_id - Start tick id. -- prev_tick_time - Start tick time. -- prev_tick_event_seq - value from event id sequence at the time tick was issued. -- Calls: -- pgq.insert_event_raw(11) -- Tables directly manipulated: -- update - pgq.subscription -- ---------------------------------------------------------------------- declare errmsg text; queue_id integer; sub_id integer; cons_id integer; begin select s.sub_queue, s.sub_consumer, s.sub_id, s.sub_batch, t1.tick_id, t1.tick_time, t1.tick_event_seq, t2.tick_id, t2.tick_time, t2.tick_event_seq into queue_id, cons_id, sub_id, batch_id, prev_tick_id, prev_tick_time, prev_tick_event_seq, cur_tick_id, cur_tick_time, cur_tick_event_seq from pgq.consumer c, pgq.queue q, pgq.subscription s left join pgq.tick t1 on (t1.tick_queue = s.sub_queue and t1.tick_id = s.sub_last_tick) left join pgq.tick t2 on (t2.tick_queue = s.sub_queue and t2.tick_id = s.sub_next_tick) where q.queue_name = i_queue_name and c.co_name = i_consumer_name and s.sub_queue = q.queue_id and s.sub_consumer = c.co_id; if not found then errmsg := 'Not subscriber to queue: ' || coalesce(i_queue_name, 'NULL') || '/' || coalesce(i_consumer_name, 'NULL'); raise exception '%', errmsg; end if; -- sanity check if prev_tick_id is null then raise exception 'PgQ corruption: Consumer % on queue % does not see tick %', i_consumer_name, i_queue_name, prev_tick_id; end if; -- has already active batch if batch_id is not null then return; end if; if i_min_interval is null and i_min_count is null then -- find next tick select tick_id, tick_time, tick_event_seq into cur_tick_id, cur_tick_time, cur_tick_event_seq from pgq.tick where tick_id > prev_tick_id and tick_queue = queue_id order by tick_queue asc, tick_id asc limit 1; else -- find custom tick select next_tick_id, next_tick_time, next_tick_seq into cur_tick_id, cur_tick_time, cur_tick_event_seq from pgq.find_tick_helper(queue_id, prev_tick_id, prev_tick_time, prev_tick_event_seq, i_min_count, i_min_interval); end if; if i_min_lag is not null then -- enforce min lag if now() - cur_tick_time < i_min_lag then cur_tick_id := NULL; cur_tick_time := NULL; cur_tick_event_seq := NULL; end if; end if; if cur_tick_id is null then -- nothing to do prev_tick_id := null; prev_tick_time := null; prev_tick_event_seq := null; return; end if; -- get next batch batch_id := nextval('pgq.batch_id_seq'); update pgq.subscription set sub_batch = batch_id, sub_next_tick = cur_tick_id, sub_active = now() where sub_queue = queue_id and sub_consumer = cons_id; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq/functions/pgq.drop_queue.sql0000644000000000000000000000477312426435645017402 0ustar create or replace function pgq.drop_queue(x_queue_name text, x_force bool) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.drop_queue(2) -- -- Drop queue and all associated tables. -- -- Parameters: -- x_queue_name - queue name -- x_force - ignore (drop) existing consumers -- Returns: -- 1 - success -- Calls: -- pgq.unregister_consumer(queue_name, consumer_name) -- perform pgq.ticker(i_queue_name); -- perform pgq.tune_storage(i_queue_name); -- Tables directly manipulated: -- delete - pgq.queue -- drop - pgq.event_N (), pgq.event_N_0 .. pgq.event_N_M -- ---------------------------------------------------------------------- declare tblname text; q record; num integer; begin -- check if exists select * into q from pgq.queue where queue_name = x_queue_name for update; if not found then raise exception 'No such event queue'; end if; if x_force then perform pgq.unregister_consumer(queue_name, consumer_name) from pgq.get_consumer_info(x_queue_name); else -- check if no consumers select count(*) into num from pgq.subscription where sub_queue = q.queue_id; if num > 0 then raise exception 'cannot drop queue, consumers still attached'; end if; end if; -- drop data tables for i in 0 .. (q.queue_ntables - 1) loop tblname := q.queue_data_pfx || '_' || i::text; execute 'DROP TABLE ' || pgq.quote_fqname(tblname); end loop; execute 'DROP TABLE ' || pgq.quote_fqname(q.queue_data_pfx); -- delete ticks delete from pgq.tick where tick_queue = q.queue_id; -- drop seqs -- FIXME: any checks needed here? execute 'DROP SEQUENCE ' || pgq.quote_fqname(q.queue_tick_seq); execute 'DROP SEQUENCE ' || pgq.quote_fqname(q.queue_event_seq); -- delete event delete from pgq.queue where queue_name = x_queue_name; return 1; end; $$ language plpgsql security definer; create or replace function pgq.drop_queue(x_queue_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.drop_queue(1) -- -- Drop queue and all associated tables. -- No consumers must be listening on the queue. -- -- ---------------------------------------------------------------------- begin return pgq.drop_queue(x_queue_name, false); end; $$ language plpgsql strict; skytools-3.2.6/sql/pgq/functions/pgq.get_batch_cursor.sql0000644000000000000000000000652612426435645020545 0ustar create or replace function pgq.get_batch_cursor( in i_batch_id bigint, in i_cursor_name text, in i_quick_limit int4, in i_extra_where text, out ev_id bigint, out ev_time timestamptz, out ev_txid bigint, out ev_retry int4, out ev_type text, out ev_data text, out ev_extra1 text, out ev_extra2 text, out ev_extra3 text, out ev_extra4 text) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_batch_cursor(4) -- -- Get events in batch using a cursor. -- -- Parameters: -- i_batch_id - ID of active batch. -- i_cursor_name - Name for new cursor -- i_quick_limit - Number of events to return immediately -- i_extra_where - optional where clause to filter events -- -- Returns: -- List of events. -- Calls: -- pgq.batch_event_sql(i_batch_id) - internal function which generates SQL optimised specially for getting events in this batch -- ---------------------------------------------------------------------- declare _cname text; _sql text; begin if i_batch_id is null or i_cursor_name is null or i_quick_limit is null then return; end if; _cname := quote_ident(i_cursor_name); _sql := pgq.batch_event_sql(i_batch_id); -- apply extra where if i_extra_where is not null then _sql := replace(_sql, ' order by 1', ''); _sql := 'select * from (' || _sql || ') _evs where ' || i_extra_where || ' order by 1'; end if; -- create cursor execute 'declare ' || _cname || ' no scroll cursor for ' || _sql; -- if no events wanted, don't bother with execute if i_quick_limit <= 0 then return; end if; -- return first block of events for ev_id, ev_time, ev_txid, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4 in execute 'fetch ' || i_quick_limit::text || ' from ' || _cname loop return next; end loop; return; end; $$ language plpgsql; -- no perms needed create or replace function pgq.get_batch_cursor( in i_batch_id bigint, in i_cursor_name text, in i_quick_limit int4, out ev_id bigint, out ev_time timestamptz, out ev_txid bigint, out ev_retry int4, out ev_type text, out ev_data text, out ev_extra1 text, out ev_extra2 text, out ev_extra3 text, out ev_extra4 text) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_batch_cursor(3) -- -- Get events in batch using a cursor. -- -- Parameters: -- i_batch_id - ID of active batch. -- i_cursor_name - Name for new cursor -- i_quick_limit - Number of events to return immediately -- -- Returns: -- List of events. -- Calls: -- pgq.get_batch_cursor(4) -- ---------------------------------------------------------------------- begin for ev_id, ev_time, ev_txid, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4 in select * from pgq.get_batch_cursor(i_batch_id, i_cursor_name, i_quick_limit, null) loop return next; end loop; return; end; $$ language plpgsql strict; -- no perms needed skytools-3.2.6/sql/pgq/functions/pgq.unregister_consumer.sql0000644000000000000000000000454012426435645021324 0ustar create or replace function pgq.unregister_consumer( x_queue_name text, x_consumer_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.unregister_consumer(2) -- -- Unsubscribe consumer from the queue. -- Also consumer's retry events are deleted. -- -- Parameters: -- x_queue_name - Name of the queue -- x_consumer_name - Name of the consumer -- -- Returns: -- number of (sub)consumers unregistered -- Calls: -- None -- Tables directly manipulated: -- delete - pgq.retry_queue -- delete - pgq.subscription -- ---------------------------------------------------------------------- declare x_sub_id integer; _sub_id_cnt integer; _consumer_id integer; _is_subconsumer boolean; begin select s.sub_id, c.co_id, -- subconsumers can only have both null or both not null - main consumer for subconsumers has only one not null (s.sub_last_tick IS NULL AND s.sub_next_tick IS NULL) OR (s.sub_last_tick IS NOT NULL AND s.sub_next_tick IS NOT NULL) into x_sub_id, _consumer_id, _is_subconsumer from pgq.subscription s, pgq.consumer c, pgq.queue q where s.sub_queue = q.queue_id and s.sub_consumer = c.co_id and q.queue_name = x_queue_name and c.co_name = x_consumer_name for update of s, c; if not found then return 0; end if; -- consumer + subconsumer count select count(*) into _sub_id_cnt from pgq.subscription where sub_id = x_sub_id; -- delete only one subconsumer if _sub_id_cnt > 1 and _is_subconsumer then delete from pgq.subscription where sub_id = x_sub_id and sub_consumer = _consumer_id; return 1; else -- delete main consumer (including possible subconsumers) -- retry events delete from pgq.retry_queue where ev_owner = x_sub_id; -- this will drop subconsumers too delete from pgq.subscription where sub_id = x_sub_id; perform 1 from pgq.subscription where sub_consumer = _consumer_id; if not found then delete from pgq.consumer where co_id = _consumer_id; end if; return _sub_id_cnt; end if; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq/functions/pgq.set_queue_config.sql0000644000000000000000000000326112426435645020545 0ustar create or replace function pgq.set_queue_config( x_queue_name text, x_param_name text, x_param_value text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.set_queue_config(3) -- -- -- Set configuration for specified queue. -- -- Parameters: -- x_queue_name - Name of the queue to configure. -- x_param_name - Configuration parameter name. -- x_param_value - Configuration parameter value. -- -- Returns: -- 0 if event was already in queue, 1 otherwise. -- Calls: -- None -- Tables directly manipulated: -- update - pgq.queue -- ---------------------------------------------------------------------- declare v_param_name text; begin -- discard NULL input if x_queue_name is null or x_param_name is null then raise exception 'Invalid NULL value'; end if; -- check if queue exists perform 1 from pgq.queue where queue_name = x_queue_name; if not found then raise exception 'No such event queue'; end if; -- check if valid parameter name v_param_name := 'queue_' || x_param_name; if v_param_name not in ( 'queue_ticker_max_count', 'queue_ticker_max_lag', 'queue_ticker_idle_period', 'queue_ticker_paused', 'queue_rotation_period', 'queue_external_ticker') then raise exception 'cannot change parameter "%s"', x_param_name; end if; execute 'update pgq.queue set ' || v_param_name || ' = ' || quote_literal(x_param_value) || ' where queue_name = ' || quote_literal(x_queue_name); return 1; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq/functions/pgq.version.sql0000644000000000000000000000064512426435645016711 0ustar create or replace function pgq.version() returns text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.version(0) -- -- Returns version string for pgq. ATM it is based on SkyTools -- version and only bumped when database code changes. -- ---------------------------------------------------------------------- begin return '3.2.6'; end; $$ language plpgsql; skytools-3.2.6/sql/pgq/functions/pgq.finish_batch.sql0000644000000000000000000000170012426435645017636 0ustar create or replace function pgq.finish_batch( x_batch_id bigint) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.finish_batch(1) -- -- Closes a batch. No more operations can be done with events -- of this batch. -- -- Parameters: -- x_batch_id - id of batch. -- -- Returns: -- 1 if batch was found, 0 otherwise. -- Calls: -- None -- Tables directly manipulated: -- update - pgq.subscription -- ---------------------------------------------------------------------- begin update pgq.subscription set sub_active = now(), sub_last_tick = sub_next_tick, sub_next_tick = null, sub_batch = null where sub_batch = x_batch_id; if not found then raise warning 'finish_batch: batch % not found', x_batch_id; return 0; end if; return 1; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq/functions/pgq.force_tick.sql0000644000000000000000000000270612426435645017334 0ustar create or replace function pgq.force_tick(i_queue_name text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.force_tick(2) -- -- Simulate lots of events happening to force ticker to tick. -- -- Should be called in loop, with some delay until last tick -- changes or too much time is passed. -- -- Such function is needed because paraller calls of pgq.ticker() are -- dangerous, and cannot be protected with locks as snapshot -- is taken before locking. -- -- Parameters: -- i_queue_name - Name of the queue -- -- Returns: -- Currently last tick id. -- ---------------------------------------------------------------------- declare q record; t record; begin -- bump seq and get queue id select queue_id, setval(queue_event_seq, nextval(queue_event_seq) + queue_ticker_max_count * 2 + 1000) as tmp into q from pgq.queue where queue_name = i_queue_name and not queue_external_ticker and not queue_ticker_paused; --if not found then -- raise notice 'queue not found or ticks not allowed'; --end if; -- return last tick id select tick_id into t from pgq.tick, pgq.queue where tick_queue = queue_id and queue_name = i_queue_name order by tick_queue desc, tick_id desc limit 1; return t.tick_id; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq/functions/pgq.get_consumer_info.sql0000644000000000000000000001075312426435645020732 0ustar create or replace function pgq.get_consumer_info( out queue_name text, out consumer_name text, out lag interval, out last_seen interval, out last_tick bigint, out current_batch bigint, out next_tick bigint, out pending_events bigint) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_consumer_info(0) -- -- Returns info about all consumers on all queues. -- -- Returns: -- See pgq.get_consumer_info(2) -- ---------------------------------------------------------------------- begin for queue_name, consumer_name, lag, last_seen, last_tick, current_batch, next_tick, pending_events in select f.queue_name, f.consumer_name, f.lag, f.last_seen, f.last_tick, f.current_batch, f.next_tick, f.pending_events from pgq.get_consumer_info(null, null) f loop return next; end loop; return; end; $$ language plpgsql security definer; create or replace function pgq.get_consumer_info( in i_queue_name text, out queue_name text, out consumer_name text, out lag interval, out last_seen interval, out last_tick bigint, out current_batch bigint, out next_tick bigint, out pending_events bigint) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_consumer_info(1) -- -- Returns info about all consumers on single queue. -- -- Returns: -- See pgq.get_consumer_info(2) -- ---------------------------------------------------------------------- begin for queue_name, consumer_name, lag, last_seen, last_tick, current_batch, next_tick, pending_events in select f.queue_name, f.consumer_name, f.lag, f.last_seen, f.last_tick, f.current_batch, f.next_tick, f.pending_events from pgq.get_consumer_info(i_queue_name, null) f loop return next; end loop; return; end; $$ language plpgsql security definer; create or replace function pgq.get_consumer_info( in i_queue_name text, in i_consumer_name text, out queue_name text, out consumer_name text, out lag interval, out last_seen interval, out last_tick bigint, out current_batch bigint, out next_tick bigint, out pending_events bigint) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_consumer_info(2) -- -- Get info about particular consumer on particular queue. -- -- Parameters: -- i_queue_name - name of a queue. (null = all) -- i_consumer_name - name of a consumer (null = all) -- -- Returns: -- queue_name - Queue name -- consumer_name - Consumer name -- lag - How old are events the consumer is processing -- last_seen - When the consumer seen by pgq -- last_tick - Tick ID of last processed tick -- current_batch - Current batch ID, if one is active or NULL -- next_tick - If batch is active, then its final tick. -- ---------------------------------------------------------------------- declare _pending_events bigint; _queue_id bigint; begin for queue_name, consumer_name, lag, last_seen, last_tick, current_batch, next_tick, _pending_events, _queue_id in select q.queue_name, c.co_name, current_timestamp - t.tick_time, current_timestamp - s.sub_active, s.sub_last_tick, s.sub_batch, s.sub_next_tick, t.tick_event_seq, q.queue_id from pgq.queue q, pgq.consumer c, pgq.subscription s left join pgq.tick t on (t.tick_queue = s.sub_queue and t.tick_id = s.sub_last_tick) where q.queue_id = s.sub_queue and c.co_id = s.sub_consumer and (i_queue_name is null or q.queue_name = i_queue_name) and (i_consumer_name is null or c.co_name = i_consumer_name) order by 1,2 loop select t.tick_event_seq - _pending_events into pending_events from pgq.tick t where t.tick_queue = _queue_id order by t.tick_queue desc, t.tick_id desc limit 1; return next; end loop; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq/functions/pgq.ticker.sql0000644000000000000000000001257212426435645016507 0ustar create or replace function pgq.ticker(i_queue_name text, i_tick_id bigint, i_orig_timestamp timestamptz, i_event_seq bigint) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.ticker(3) -- -- External ticker: Insert a tick with a particular tick_id and timestamp. -- -- Parameters: -- i_queue_name - Name of the queue -- i_tick_id - Id of new tick. -- -- Returns: -- Tick id. -- ---------------------------------------------------------------------- begin insert into pgq.tick (tick_queue, tick_id, tick_time, tick_event_seq) select queue_id, i_tick_id, i_orig_timestamp, i_event_seq from pgq.queue where queue_name = i_queue_name and queue_external_ticker and not queue_ticker_paused; if not found then raise exception 'queue not found or ticker disabled: %', i_queue_name; end if; -- make sure seqs stay current perform pgq.seq_setval(queue_tick_seq, i_tick_id), pgq.seq_setval(queue_event_seq, i_event_seq) from pgq.queue where queue_name = i_queue_name; return i_tick_id; end; $$ language plpgsql security definer; -- unsure about access create or replace function pgq.ticker(i_queue_name text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.ticker(1) -- -- Check if tick is needed for the queue and insert it. -- -- For pgqadm usage. -- -- Parameters: -- i_queue_name - Name of the queue -- -- Returns: -- Tick id or NULL if no tick was done. -- ---------------------------------------------------------------------- declare res bigint; q record; state record; last2 record; begin select queue_id, queue_tick_seq, queue_external_ticker, queue_ticker_max_count, queue_ticker_max_lag, queue_ticker_idle_period, queue_event_seq, pgq.seq_getval(queue_event_seq) as event_seq, queue_ticker_paused into q from pgq.queue where queue_name = i_queue_name; if not found then raise exception 'no such queue'; end if; if q.queue_external_ticker then raise exception 'This queue has external tick source.'; end if; if q.queue_ticker_paused then raise exception 'Ticker has been paused for this queue'; end if; -- load state from last tick select now() - tick_time as lag, q.event_seq - tick_event_seq as new_events, tick_id, tick_time, tick_event_seq, txid_snapshot_xmax(tick_snapshot) as sxmax, txid_snapshot_xmin(tick_snapshot) as sxmin into state from pgq.tick where tick_queue = q.queue_id order by tick_queue desc, tick_id desc limit 1; if found then if state.sxmin > txid_current() then raise exception 'Invalid PgQ state: old xmin=%, old xmax=%, cur txid=%', state.sxmin, state.sxmax, txid_current(); end if; if state.new_events < 0 then raise warning 'Negative new_events? old=% cur=%', state.tick_event_seq, q.event_seq; end if; if state.sxmax > txid_current() then raise warning 'Dubious PgQ state: old xmax=%, cur txid=%', state.sxmax, txid_current(); end if; if state.new_events > 0 then -- there are new events, should we wait a bit? if state.new_events < q.queue_ticker_max_count and state.lag < q.queue_ticker_max_lag then return NULL; end if; else -- no new events, should we apply idle period? -- check previous event from the last one. select state.tick_time - tick_time as lag into last2 from pgq.tick where tick_queue = q.queue_id and tick_id < state.tick_id order by tick_queue desc, tick_id desc limit 1; if found then -- gradually decrease the tick frequency if (state.lag < q.queue_ticker_max_lag / 2) or (state.lag < last2.lag * 2 and state.lag < q.queue_ticker_idle_period) then return NULL; end if; end if; end if; end if; insert into pgq.tick (tick_queue, tick_id, tick_event_seq) values (q.queue_id, nextval(q.queue_tick_seq), q.event_seq); return currval(q.queue_tick_seq); end; $$ language plpgsql security definer; -- unsure about access create or replace function pgq.ticker() returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.ticker(0) -- -- Creates ticks for all unpaused queues which dont have external ticker. -- -- Returns: -- Number of queues that were processed. -- ---------------------------------------------------------------------- declare res bigint; q record; begin res := 0; for q in select queue_name from pgq.queue where not queue_external_ticker and not queue_ticker_paused order by queue_name loop if pgq.ticker(q.queue_name) > 0 then res := res + 1; end if; end loop; return res; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq/functions/pgq.quote_fqname.sql0000644000000000000000000000162112426435645017703 0ustar create or replace function pgq.quote_fqname(i_name text) returns text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.quote_fqname(1) -- -- Quete fully-qualified object name for SQL. -- -- First dot is taken as schema separator. -- -- If schema is missing, 'public' is assumed. -- -- Parameters: -- i_name - fully qualified object name. -- -- Returns: -- Quoted name. -- ---------------------------------------------------------------------- declare res text; pos integer; s text; n text; begin pos := position('.' in i_name); if pos > 0 then s := substring(i_name for pos - 1); n := substring(i_name from pos + 1); else s := 'public'; n := i_name; end if; return quote_ident(s) || '.' || quote_ident(n); end; $$ language plpgsql strict immutable; skytools-3.2.6/sql/pgq/functions/pgq.maint_operations.sql0000644000000000000000000000724412426435645020601 0ustar create or replace function pgq.maint_operations(out func_name text, out func_arg text) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq.maint_operations(0) -- -- Returns list of functions to call for maintenance. -- -- The goal is to avoid hardcoding them into maintenance process. -- -- Function signature: -- Function should take either 1 or 0 arguments and return 1 if it wants -- to be called immediately again, 0 if not. -- -- Returns: -- func_name - Function to call -- func_arg - Optional argument to function (queue name) -- ---------------------------------------------------------------------- declare ops text[]; nrot int4; begin -- rotate step 1 nrot := 0; func_name := 'pgq.maint_rotate_tables_step1'; for func_arg in select queue_name from pgq.queue where queue_rotation_period is not null and queue_switch_step2 is not null and queue_switch_time + queue_rotation_period < current_timestamp order by 1 loop nrot := nrot + 1; return next; end loop; -- rotate step 2 if nrot = 0 then select count(1) from pgq.queue where queue_rotation_period is not null and queue_switch_step2 is null into nrot; end if; if nrot > 0 then func_name := 'pgq.maint_rotate_tables_step2'; func_arg := NULL; return next; end if; -- check if extra field exists perform 1 from pg_attribute where attrelid = 'pgq.queue'::regclass and attname = 'queue_extra_maint'; if found then -- add extra ops for func_arg, ops in select q.queue_name, queue_extra_maint from pgq.queue q where queue_extra_maint is not null order by 1 loop for i in array_lower(ops, 1) .. array_upper(ops, 1) loop func_name = ops[i]; return next; end loop; end loop; end if; -- vacuum tables func_name := 'vacuum'; for func_arg in select * from pgq.maint_tables_to_vacuum() loop return next; end loop; -- -- pgq_node & londiste -- -- although they belong to queue_extra_maint, they are -- common enough so its more effective to handle them here. -- perform 1 from pg_proc p, pg_namespace n where p.pronamespace = n.oid and n.nspname = 'pgq_node' and p.proname = 'maint_watermark'; if found then func_name := 'pgq_node.maint_watermark'; for func_arg in select n.queue_name from pgq_node.node_info n where n.node_type = 'root' loop return next; end loop; end if; perform 1 from pg_proc p, pg_namespace n where p.pronamespace = n.oid and n.nspname = 'londiste' and p.proname = 'root_check_seqs'; if found then func_name := 'londiste.root_check_seqs'; for func_arg in select distinct s.queue_name from londiste.seq_info s, pgq_node.node_info n where s.local and n.node_type = 'root' and n.queue_name = s.queue_name loop return next; end loop; end if; perform 1 from pg_proc p, pg_namespace n where p.pronamespace = n.oid and n.nspname = 'londiste' and p.proname = 'periodic_maintenance'; if found then func_name := 'londiste.periodic_maintenance'; func_arg := NULL; return next; end if; return; end; $$ language plpgsql; skytools-3.2.6/sql/pgq/functions/pgq.seq_funcs.sql0000644000000000000000000000324112426435645017205 0ustar create or replace function pgq.seq_getval(i_seq_name text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.seq_getval(1) -- -- Read current last_val from seq, without affecting it. -- -- Parameters: -- i_seq_name - Name of the sequence -- -- Returns: -- last value. -- ---------------------------------------------------------------------- declare res int8; fqname text; pos integer; s text; n text; begin pos := position('.' in i_seq_name); if pos > 0 then s := substring(i_seq_name for pos - 1); n := substring(i_seq_name from pos + 1); else s := 'public'; n := i_seq_name; end if; fqname := quote_ident(s) || '.' || quote_ident(n); execute 'select last_value from ' || fqname into res; return res; end; $$ language plpgsql strict; create or replace function pgq.seq_setval(i_seq_name text, i_new_value int8) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq.seq_setval(2) -- -- Like setval() but does not allow going back. -- -- Parameters: -- i_seq_name - Name of the sequence -- i_new_value - new value -- -- Returns: -- current last value. -- ---------------------------------------------------------------------- declare res int8; fqname text; begin fqname := pgq.quote_fqname(i_seq_name); res := pgq.seq_getval(i_seq_name); if res < i_new_value then perform setval(fqname, i_new_value); return i_new_value; end if; return res; end; $$ language plpgsql strict; skytools-3.2.6/sql/pgq/functions/pgq.maint_retry_events.sql0000644000000000000000000000271012426435645021140 0ustar create or replace function pgq.maint_retry_events() returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.maint_retry_events(0) -- -- Moves retry events back to main queue. -- -- It moves small amount at a time. It should be called -- until it returns 0 -- -- Returns: -- Number of events processed. -- ---------------------------------------------------------------------- declare cnt integer; rec record; begin cnt := 0; -- allow only single event mover at a time, without affecting inserts lock table pgq.retry_queue in share update exclusive mode; for rec in select queue_name, ev_id, ev_time, ev_owner, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4 from pgq.retry_queue, pgq.queue where ev_retry_after <= current_timestamp and queue_id = ev_queue order by ev_retry_after limit 10 loop cnt := cnt + 1; perform pgq.insert_event_raw(rec.queue_name, rec.ev_id, rec.ev_time, rec.ev_owner, rec.ev_retry, rec.ev_type, rec.ev_data, rec.ev_extra1, rec.ev_extra2, rec.ev_extra3, rec.ev_extra4); delete from pgq.retry_queue where ev_owner = rec.ev_owner and ev_id = rec.ev_id; end loop; return cnt; end; $$ language plpgsql; -- need admin access skytools-3.2.6/sql/pgq/functions/pgq.get_queue_info.sql0000644000000000000000000001341512426435645020221 0ustar drop function if exists pgq.get_queue_info(); drop function if exists pgq.get_queue_info(text); create or replace function pgq.get_queue_info( out queue_name text, out queue_ntables integer, out queue_cur_table integer, out queue_rotation_period interval, out queue_switch_time timestamptz, out queue_external_ticker boolean, out queue_ticker_paused boolean, out queue_ticker_max_count integer, out queue_ticker_max_lag interval, out queue_ticker_idle_period interval, out ticker_lag interval, out ev_per_sec float8, out ev_new bigint, out last_tick_id bigint) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_queue_info(0) -- -- Get info about all queues. -- -- Returns: -- List of pgq.ret_queue_info records. -- queue_name - queue name -- queue_ntables - number of tables in this queue -- queue_cur_table - ??? -- queue_rotation_period - how often the event_N_M tables in this queue are rotated -- queue_switch_time - ??? when was this queue last rotated -- queue_external_ticker - ??? -- queue_ticker_paused - ??? is ticker paused in this queue -- queue_ticker_max_count - max number of events before a tick is issued -- queue_ticker_max_lag - maks time without a tick -- queue_ticker_idle_period - how often the ticker should check this queue -- ticker_lag - time from last tick -- ev_per_sec - how many events per second this queue serves -- ev_new - ??? -- last_tick_id - last tick id for this queue -- -- ---------------------------------------------------------------------- begin for queue_name, queue_ntables, queue_cur_table, queue_rotation_period, queue_switch_time, queue_external_ticker, queue_ticker_paused, queue_ticker_max_count, queue_ticker_max_lag, queue_ticker_idle_period, ticker_lag, ev_per_sec, ev_new, last_tick_id in select f.queue_name, f.queue_ntables, f.queue_cur_table, f.queue_rotation_period, f.queue_switch_time, f.queue_external_ticker, f.queue_ticker_paused, f.queue_ticker_max_count, f.queue_ticker_max_lag, f.queue_ticker_idle_period, f.ticker_lag, f.ev_per_sec, f.ev_new, f.last_tick_id from pgq.get_queue_info(null) f loop return next; end loop; return; end; $$ language plpgsql; create or replace function pgq.get_queue_info( in i_queue_name text, out queue_name text, out queue_ntables integer, out queue_cur_table integer, out queue_rotation_period interval, out queue_switch_time timestamptz, out queue_external_ticker boolean, out queue_ticker_paused boolean, out queue_ticker_max_count integer, out queue_ticker_max_lag interval, out queue_ticker_idle_period interval, out ticker_lag interval, out ev_per_sec float8, out ev_new bigint, out last_tick_id bigint) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_queue_info(1) -- -- Get info about particular queue. -- -- Returns: -- One pgq.ret_queue_info record. -- contente same as forpgq.get_queue_info() -- ---------------------------------------------------------------------- declare _ticker_lag interval; _top_tick_id bigint; _ht_tick_id bigint; _top_tick_time timestamptz; _top_tick_event_seq bigint; _ht_tick_time timestamptz; _ht_tick_event_seq bigint; _queue_id integer; _queue_event_seq text; begin for queue_name, queue_ntables, queue_cur_table, queue_rotation_period, queue_switch_time, queue_external_ticker, queue_ticker_paused, queue_ticker_max_count, queue_ticker_max_lag, queue_ticker_idle_period, _queue_id, _queue_event_seq in select q.queue_name, q.queue_ntables, q.queue_cur_table, q.queue_rotation_period, q.queue_switch_time, q.queue_external_ticker, q.queue_ticker_paused, q.queue_ticker_max_count, q.queue_ticker_max_lag, q.queue_ticker_idle_period, q.queue_id, q.queue_event_seq from pgq.queue q where (i_queue_name is null or q.queue_name = i_queue_name) order by q.queue_name loop -- most recent tick select (current_timestamp - t.tick_time), tick_id, t.tick_time, t.tick_event_seq into ticker_lag, _top_tick_id, _top_tick_time, _top_tick_event_seq from pgq.tick t where t.tick_queue = _queue_id order by t.tick_queue desc, t.tick_id desc limit 1; -- slightly older tick select ht.tick_id, ht.tick_time, ht.tick_event_seq into _ht_tick_id, _ht_tick_time, _ht_tick_event_seq from pgq.tick ht where ht.tick_queue = _queue_id and ht.tick_id >= _top_tick_id - 20 order by ht.tick_queue asc, ht.tick_id asc limit 1; if _ht_tick_time < _top_tick_time then ev_per_sec = (_top_tick_event_seq - _ht_tick_event_seq) / extract(epoch from (_top_tick_time - _ht_tick_time)); else ev_per_sec = null; end if; ev_new = pgq.seq_getval(_queue_event_seq) - _top_tick_event_seq; last_tick_id = _top_tick_id; return next; end loop; return; end; $$ language plpgsql; skytools-3.2.6/sql/pgq/functions/pgq.batch_event_tables.sql0000644000000000000000000000415412426435645021037 0ustar create or replace function pgq.batch_event_tables(x_batch_id bigint) returns setof text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.batch_event_tables(1) -- -- Returns set of table names where this batch events may reside. -- -- Parameters: -- x_batch_id - ID of a active batch. -- ---------------------------------------------------------------------- declare nr integer; tbl text; use_prev integer; use_next integer; batch record; begin select txid_snapshot_xmin(last.tick_snapshot) as tx_min, -- absolute minimum txid_snapshot_xmax(cur.tick_snapshot) as tx_max, -- absolute maximum q.queue_data_pfx, q.queue_ntables, q.queue_cur_table, q.queue_switch_step1, q.queue_switch_step2 into batch from pgq.tick last, pgq.tick cur, pgq.subscription s, pgq.queue q where cur.tick_id = s.sub_next_tick and cur.tick_queue = s.sub_queue and last.tick_id = s.sub_last_tick and last.tick_queue = s.sub_queue and s.sub_batch = x_batch_id and q.queue_id = s.sub_queue; if not found then raise exception 'Cannot find data for batch %', x_batch_id; end if; -- if its definitely not in one or other, look into both if batch.tx_max < batch.queue_switch_step1 then use_prev := 1; use_next := 0; elsif batch.queue_switch_step2 is not null and (batch.tx_min > batch.queue_switch_step2) then use_prev := 0; use_next := 1; else use_prev := 1; use_next := 1; end if; if use_prev then nr := batch.queue_cur_table - 1; if nr < 0 then nr := batch.queue_ntables - 1; end if; tbl := batch.queue_data_pfx || '_' || nr::text; return next tbl; end if; if use_next then tbl := batch.queue_data_pfx || '_' || batch.queue_cur_table::text; return next tbl; end if; return; end; $$ language plpgsql; -- no perms needed skytools-3.2.6/sql/pgq/functions/pgq.register_consumer.sql0000644000000000000000000000752512426435645020767 0ustar create or replace function pgq.register_consumer( x_queue_name text, x_consumer_id text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.register_consumer(2) -- -- Subscribe consumer on a queue. -- -- From this moment forward, consumer will see all events in the queue. -- -- Parameters: -- x_queue_name - Name of queue -- x_consumer_name - Name of consumer -- -- Returns: -- 0 - if already registered -- 1 - if new registration -- Calls: -- pgq.register_consumer_at(3) -- Tables directly manipulated: -- None -- ---------------------------------------------------------------------- begin return pgq.register_consumer_at(x_queue_name, x_consumer_id, NULL); end; $$ language plpgsql security definer; create or replace function pgq.register_consumer_at( x_queue_name text, x_consumer_name text, x_tick_pos bigint) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.register_consumer_at(3) -- -- Extended registration, allows to specify tick_id. -- -- Note: -- For usage in special situations. -- -- Parameters: -- x_queue_name - Name of a queue -- x_consumer_name - Name of consumer -- x_tick_pos - Tick ID -- -- Returns: -- 0/1 whether consumer has already registered. -- Calls: -- None -- Tables directly manipulated: -- update/insert - pgq.subscription -- ---------------------------------------------------------------------- declare tmp text; last_tick bigint; x_queue_id integer; x_consumer_id integer; queue integer; sub record; begin select queue_id into x_queue_id from pgq.queue where queue_name = x_queue_name; if not found then raise exception 'Event queue not created yet'; end if; -- get consumer and create if new select co_id into x_consumer_id from pgq.consumer where co_name = x_consumer_name for update; if not found then insert into pgq.consumer (co_name) values (x_consumer_name); x_consumer_id := currval('pgq.consumer_co_id_seq'); end if; -- if particular tick was requested, check if it exists if x_tick_pos is not null then perform 1 from pgq.tick where tick_queue = x_queue_id and tick_id = x_tick_pos; if not found then raise exception 'cannot reposition, tick not found: %', x_tick_pos; end if; end if; -- check if already registered select sub_last_tick, sub_batch into sub from pgq.subscription where sub_consumer = x_consumer_id and sub_queue = x_queue_id; if found then if x_tick_pos is not null then -- if requested, update tick pos and drop partial batch update pgq.subscription set sub_last_tick = x_tick_pos, sub_batch = null, sub_next_tick = null, sub_active = now() where sub_consumer = x_consumer_id and sub_queue = x_queue_id; end if; -- already registered return 0; end if; -- new registration if x_tick_pos is null then -- start from current tick select tick_id into last_tick from pgq.tick where tick_queue = x_queue_id order by tick_queue desc, tick_id desc limit 1; if not found then raise exception 'No ticks for this queue. Please run ticker on database.'; end if; else last_tick := x_tick_pos; end if; -- register insert into pgq.subscription (sub_queue, sub_consumer, sub_last_tick) values (x_queue_id, x_consumer_id, last_tick); return 1; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq/functions/pgq.get_batch_info.sql0000644000000000000000000000364312426435645020160 0ustar create or replace function pgq.get_batch_info( in x_batch_id bigint, out queue_name text, out consumer_name text, out batch_start timestamptz, out batch_end timestamptz, out prev_tick_id bigint, out tick_id bigint, out lag interval, out seq_start bigint, out seq_end bigint) as $$ -- ---------------------------------------------------------------------- -- Function: pgq.get_batch_info(1) -- -- Returns detailed info about a batch. -- -- Parameters: -- x_batch_id - id of a active batch. -- -- Returns: ??? pls check -- queue_name - which queue this batch came from -- consumer_name - batch processed by -- batch_start - start time of batch -- batch_end - end time of batch -- prev_tick_id - start tick for this batch -- tick_id - end tick for this batch -- lag - now() - tick_id.time -- seq_start - start event id for batch -- seq_end - end event id for batch -- ---------------------------------------------------------------------- begin select q.queue_name, c.co_name, prev.tick_time, cur.tick_time, s.sub_last_tick, s.sub_next_tick, current_timestamp - cur.tick_time, prev.tick_event_seq, cur.tick_event_seq into queue_name, consumer_name, batch_start, batch_end, prev_tick_id, tick_id, lag, seq_start, seq_end from pgq.subscription s, pgq.tick cur, pgq.tick prev, pgq.queue q, pgq.consumer c where s.sub_batch = x_batch_id and prev.tick_id = s.sub_last_tick and prev.tick_queue = s.sub_queue and cur.tick_id = s.sub_next_tick and cur.tick_queue = s.sub_queue and q.queue_id = s.sub_queue and c.co_id = s.sub_consumer; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq/functions/pgq.upgrade_schema.sql0000644000000000000000000000213612426435645020170 0ustar create or replace function pgq.upgrade_schema() returns int4 as $$ -- updates table structure if necessary declare cnt int4 = 0; begin -- pgq.subscription.sub_last_tick: NOT NULL -> NULL perform 1 from information_schema.columns where table_schema = 'pgq' and table_name = 'subscription' and column_name ='sub_last_tick' and is_nullable = 'NO'; if found then alter table pgq.subscription alter column sub_last_tick drop not null; cnt := cnt + 1; end if; -- create roles perform 1 from pg_catalog.pg_roles where rolname = 'pgq_reader'; if not found then create role pgq_reader; cnt := cnt + 1; end if; perform 1 from pg_catalog.pg_roles where rolname = 'pgq_writer'; if not found then create role pgq_writer; cnt := cnt + 1; end if; perform 1 from pg_catalog.pg_roles where rolname = 'pgq_admin'; if not found then create role pgq_admin in role pgq_reader, pgq_writer; cnt := cnt + 1; end if; return cnt; end; $$ language plpgsql; skytools-3.2.6/sql/pgq/functions/pgq.batch_retry.sql0000644000000000000000000000332712426435645017532 0ustar create or replace function pgq.batch_retry( i_batch_id bigint, i_retry_seconds integer) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq.batch_retry(2) -- -- Put whole batch into retry queue, to be processed again later. -- -- Parameters: -- i_batch_id - ID of active batch. -- i_retry_time - Time when the event should be put back into queue -- -- Returns: -- number of events inserted -- Calls: -- None -- Tables directly manipulated: -- pgq.retry_queue -- ---------------------------------------------------------------------- declare _retry timestamptz; _cnt integer; _s record; begin _retry := current_timestamp + ((i_retry_seconds::text || ' seconds')::interval); select * into _s from pgq.subscription where sub_batch = i_batch_id; if not found then raise exception 'batch_retry: batch % not found', i_batch_id; end if; insert into pgq.retry_queue (ev_retry_after, ev_queue, ev_id, ev_time, ev_txid, ev_owner, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4) select distinct _retry, _s.sub_queue, b.ev_id, b.ev_time, NULL::int8, _s.sub_id, coalesce(b.ev_retry, 0) + 1, b.ev_type, b.ev_data, b.ev_extra1, b.ev_extra2, b.ev_extra3, b.ev_extra4 from pgq.get_batch_events(i_batch_id) b left join pgq.retry_queue rq on (rq.ev_id = b.ev_id and rq.ev_owner = _s.sub_id and rq.ev_queue = _s.sub_queue) where rq.ev_id is null; GET DIAGNOSTICS _cnt = ROW_COUNT; return _cnt; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq/sql/0000755000000000000000000000000012426435645012477 5ustar skytools-3.2.6/sql/pgq/sql/logutriga.sql0000644000000000000000000000754512426435645015230 0ustar \set VERBOSITY 'terse' set client_min_messages = 'warning'; select 1 from (select set_config(name, 'escape', false) as ignore from pg_settings where name = 'bytea_output') x where x.ignore = 'foo'; create or replace function pgq.insert_event(queue_name text, ev_type text, ev_data text, ev_extra1 text, ev_extra2 text, ev_extra3 text, ev_extra4 text) returns bigint as $$ begin raise warning 'insert_event(%, %, %, %)', queue_name, ev_type, ev_data, ev_extra1; return 1; end; $$ language plpgsql; create table udata ( id serial primary key, txt text, bin bytea ); create trigger utest AFTER insert or update or delete ON udata for each row execute procedure pgq.logutriga('udata_que'); insert into udata (txt) values ('text1'); insert into udata (bin) values (E'bi\tn\\000bin'); -- test ignore drop trigger utest on udata; truncate udata; create trigger utest after insert or update or delete on udata for each row execute procedure pgq.logutriga('udata_que', 'ignore=bin'); insert into udata values (1, 'txt', 'bin'); update udata set txt = 'txt'; update udata set txt = 'txt2', bin = 'bin2'; update udata set bin = 'bin3'; delete from udata; -- test missing pkey create table nopkey2 (dat text); create trigger nopkey_triga2 after insert or update or delete on nopkey2 for each row execute procedure pgq.logutriga('que3'); insert into nopkey2 values ('foo'); update nopkey2 set dat = 'bat'; delete from nopkey2; -- test custom pkey create table ucustom_pkey (dat1 text not null, dat2 int2 not null, dat3 text); create trigger ucustom_triga after insert or update or delete on ucustom_pkey --for each row execute procedure pgq.logutriga('que3', 'pkey=dat1,dat2'); for each row execute procedure pgq.logutriga('que3'); insert into ucustom_pkey values ('foo', '2'); update ucustom_pkey set dat3 = 'bat'; delete from ucustom_pkey; -- test custom fields create table custom_fields2 ( dat1 text not null primary key, dat2 int2 not null, dat3 text, _pgq_ev_type text default 'my_type', _pgq_ev_extra1 text default 'e1', _pgq_ev_extra2 text default 'e2', _pgq_ev_extra3 text default 'e3', _pgq_ev_extra4 text default 'e4' ); create trigger customf2_triga after insert or update or delete on custom_fields2 for each row execute procedure pgq.logutriga('que3'); insert into custom_fields2 values ('foo', '2'); update custom_fields2 set dat3 = 'bat'; delete from custom_fields2; -- test custom expression create table custom_expr2 ( dat1 text not null primary key, dat2 int2 not null, dat3 text ); create trigger customex2_triga after insert or update or delete on custom_expr2 for each row execute procedure pgq.logutriga('que3', 'ev_extra1=''test='' || dat1', 'ev_type=dat3'); insert into custom_expr2 values ('foo', '2'); update custom_expr2 set dat3 = 'bat'; delete from custom_expr2; -- test when= create table when_test ( dat1 text not null primary key, dat2 int2 not null, dat3 text ); create trigger when_triga after insert or update or delete on when_test for each row execute procedure pgq.logutriga('que3', 'when=dat1=''foo'''); insert into when_test values ('foo', '2'); insert into when_test values ('bar', '2'); select * from when_test; update when_test set dat3 = 'bat'; delete from when_test; drop trigger when_triga on when_test; create trigger when_triga after insert or update or delete on when_test for each row execute procedure pgq.logutriga('que3', 'when=current_user=''random'''); insert into when_test values ('foo', '2'); select * from when_test; -- test deny create table deny_test ( dat1 text not null primary key, dat2 text ); create trigger deny_triga after insert or update or delete on deny_test for each row execute procedure pgq.logutriga('noqueue', 'deny'); insert into deny_test values ('1', '2'); -- test pk update insert into udata (id, txt) values (1, 'txt'); update udata set id = 2; skytools-3.2.6/sql/pgq/sql/clean.sql0000644000000000000000000000070212426435645014301 0ustar \set VERBOSITY 'terse' set client_min_messages = 'warning'; drop schema pgq cascade; drop sequence tmptest_seq; drop table custom_expr; drop table custom_expr2; drop table custom_fields; drop table custom_fields2; drop table custom_pkey; drop table deny_test; drop table nopkey; drop table nopkey2; drop table rtest; drop table if exists trunctrg1; drop table if exists trunctrg2; drop table ucustom_pkey; drop table udata; drop table when_test; skytools-3.2.6/sql/pgq/sql/trunctrg.sql0000644000000000000000000000135612426435645015075 0ustar \set VERBOSITY 'terse' set client_min_messages = 'warning'; -- test sqltriga truncate create table trunctrg1 ( dat1 text not null primary key, dat2 int2 not null, dat3 text ); create trigger trunc1_trig after truncate on trunctrg1 for each statement execute procedure pgq.sqltriga('que3'); truncate trunctrg1; -- test logutriga truncate create table trunctrg2 ( dat1 text not null primary key, dat2 int2 not null, dat3 text ); create trigger trunc2_trig after truncate on trunctrg2 for each statement execute procedure pgq.logutriga('que3'); truncate trunctrg2; -- test deny create trigger deny_triga2 after truncate on trunctrg2 for each statement execute procedure pgq.logutriga('noqueue', 'deny'); truncate trunctrg2; skytools-3.2.6/sql/pgq/sql/pgq_init_upgrade.sql0000644000000000000000000000031212426435645016535 0ustar \set ECHO none \set VERBOSITY 'terse' set client_min_messages = 'warning'; \i ../../upgrade/final/pgq_core_2.1.13.sql \i ../../upgrade/final/pgq.upgrade_2.1_to_3.0.sql \i pgq.upgrade.sql \set ECHO all skytools-3.2.6/sql/pgq/sql/pgq_core.sql0000644000000000000000000001146012426435645015021 0ustar \set VERBOSITY 'terse' set client_min_messages = 'warning'; select * from pgq.maint_tables_to_vacuum(); select * from pgq.maint_retry_events(); select pgq.create_queue('tmpqueue'); select pgq.register_consumer('tmpqueue', 'consumer'); select pgq.unregister_consumer('tmpqueue', 'consumer'); select pgq.drop_queue('tmpqueue'); select pgq.create_queue('myqueue'); select pgq.register_consumer('myqueue', 'consumer'); update pgq.queue set queue_ticker_max_lag = '0', queue_ticker_idle_period = '0'; select pgq.next_batch('myqueue', 'consumer'); select pgq.next_batch('myqueue', 'consumer'); select pgq.ticker(); select pgq.next_batch('myqueue', 'consumer'); select pgq.next_batch('myqueue', 'consumer'); select queue_name, consumer_name, prev_tick_id, tick_id, lag < '30 seconds' as lag_exists from pgq.get_batch_info(1); select queue_name, queue_ntables, queue_cur_table, queue_rotation_period, queue_switch_time <= now() as switch_time_exists, queue_external_ticker, queue_ticker_max_count, queue_ticker_max_lag, queue_ticker_idle_period, ticker_lag < '2 hours' as ticker_lag_exists, last_tick_id from pgq.get_queue_info() order by 1; select queue_name, consumer_name, lag < '30 seconds' as lag_exists, last_seen < '30 seconds' as last_seen_exists, last_tick, current_batch, next_tick from pgq.get_consumer_info() order by 1, 2; select pgq.finish_batch(1); select pgq.finish_batch(1); select pgq.ticker(); select pgq.next_batch('myqueue', 'consumer'); select * from pgq.batch_event_tables(2); select * from pgq.get_batch_events(2); select pgq.finish_batch(2); select pgq.insert_event('myqueue', 'r1', 'data'); select pgq.insert_event('myqueue', 'r2', 'data', 'extra1', 'extra2', 'extra3', 'extra4'); select pgq.insert_event('myqueue', 'r3', 'data'); select pgq.current_event_table('myqueue'); select pgq.ticker(); select * from pgq.next_batch_custom('myqueue', 'consumer', '1 hour', null, null); select * from pgq.next_batch_custom('myqueue', 'consumer', null, 10000, null); select * from pgq.next_batch_custom('myqueue', 'consumer', null, null, '10 minutes'); select pgq.next_batch('myqueue', 'consumer'); select ev_id,ev_retry,ev_type,ev_data,ev_extra1,ev_extra2,ev_extra3,ev_extra4 from pgq.get_batch_events(3); begin; select ev_id,ev_retry,ev_type,ev_data,ev_extra1,ev_extra2,ev_extra3,ev_extra4 from pgq.get_batch_cursor(3, 'acurs', 10); close acurs; select ev_id,ev_retry,ev_type,ev_data,ev_extra1,ev_extra2,ev_extra3,ev_extra4 from pgq.get_batch_cursor(3, 'acurs', 2); close acurs; select ev_id,ev_retry,ev_type,ev_data,ev_extra1,ev_extra2,ev_extra3,ev_extra4 from pgq.get_batch_cursor(3, 'acurs', 2, 'ev_id = 1'); close acurs; end; select pgq.event_retry(3, 2, 0); select pgq.batch_retry(3, 0); select pgq.finish_batch(3); select pgq.event_retry_raw('myqueue', 'consumer', now(), 666, now(), 0, 'rawtest', 'data', null, null, null, null); select pgq.ticker(); -- test maint update pgq.queue set queue_rotation_period = '0 seconds'; select queue_name, pgq.maint_rotate_tables_step1(queue_name) from pgq.queue; select pgq.maint_rotate_tables_step2(); -- test extra select nextval(queue_event_seq) from pgq.queue where queue_name = 'myqueue'; select pgq.force_tick('myqueue'); select nextval(queue_event_seq) from pgq.queue where queue_name = 'myqueue'; create sequence tmptest_seq; select pgq.seq_getval('tmptest_seq'); select pgq.seq_setval('tmptest_seq', 10); select pgq.seq_setval('tmptest_seq', 5); select pgq.seq_setval('tmptest_seq', 15); select pgq.seq_getval('tmptest_seq'); -- test disabled select pgq.insert_event('myqueue', 'test', 'event'); update pgq.queue set queue_disable_insert = true where queue_name = 'myqueue'; select pgq.insert_event('myqueue', 'test', 'event'); update pgq.queue set queue_disable_insert = false where queue_name = 'myqueue'; select pgq.insert_event('myqueue', 'test', 'event'); -- test limit update pgq.queue set queue_per_tx_limit = 2 where queue_name = 'myqueue'; begin; select pgq.insert_event('myqueue', 'test', 'event1'); select pgq.insert_event('myqueue', 'test', 'event2'); select pgq.insert_event('myqueue', 'test', 'event3'); end; update pgq.queue set queue_per_tx_limit = 0 where queue_name = 'myqueue'; begin; select pgq.insert_event('myqueue', 'test', 'event1'); select pgq.insert_event('myqueue', 'test', 'event2'); select pgq.insert_event('myqueue', 'test', 'event3'); end; update pgq.queue set queue_per_tx_limit = null where queue_name = 'myqueue'; begin; select pgq.insert_event('myqueue', 'test', 'event1'); select pgq.insert_event('myqueue', 'test', 'event2'); select pgq.insert_event('myqueue', 'test', 'event3'); end; select * from pgq.maint_operations(); alter table pgq.queue add column queue_extra_maint text[]; select * from pgq.maint_operations(); update pgq.queue set queue_extra_maint = array['baz', 'foo.bar']; select * from pgq.maint_operations(); skytools-3.2.6/sql/pgq/sql/pgq_init_noext.sql0000644000000000000000000000023212426435645016244 0ustar \set ECHO none \set VERBOSITY 'terse' set client_min_messages = 'warning'; -- \i ../txid/txid.sql -- \i pgq.sql \i structure/install.sql \set ECHO all skytools-3.2.6/sql/pgq/sql/pgq_perms.sql0000644000000000000000000000206712426435645015222 0ustar \set ECHO off \set VERBOSITY 'terse' set client_min_messages = 'warning'; -- drop public perms \i structure/newgrants_pgq.sql -- select proname, proacl from pg_proc p, pg_namespace n where n.nspname = 'pgq' and p.pronamespace = n.oid; \set ECHO all drop role if exists pgq_test_producer; drop role if exists pgq_test_consumer; drop role if exists pgq_test_admin; create role pgq_test_consumer with login in role pgq_reader; create role pgq_test_producer with login in role pgq_writer; create role pgq_test_admin with login in role pgq_admin; \c - pgq_test_admin select * from pgq.create_queue('pqueue'); -- ok \c - pgq_test_producer select * from pgq.create_queue('pqueue'); -- fail select * from pgq.insert_event('pqueue', 'test', 'data'); -- ok select * from pgq.register_consumer('pqueue', 'prod'); -- fail \c - pgq_test_consumer select * from pgq.create_queue('pqueue'); -- fail select * from pgq.insert_event('pqueue', 'test', 'data'); -- fail select * from pgq.register_consumer('pqueue', 'cons'); -- ok select * from pgq.next_batch('pqueue', 'cons'); -- ok skytools-3.2.6/sql/pgq/sql/sqltriga.sql0000644000000000000000000000646312426435645015057 0ustar \set VERBOSITY 'terse' set client_min_messages = 'warning'; -- start testing create table rtest ( id integer primary key, dat text ); create trigger rtest_triga after insert or update or delete on rtest for each row execute procedure pgq.sqltriga('que'); -- simple test insert into rtest values (1, 'value1'); update rtest set dat = 'value2'; delete from rtest; -- test new fields alter table rtest add column dat2 text; insert into rtest values (1, 'value1'); update rtest set dat = 'value2'; delete from rtest; -- test field ignore drop trigger rtest_triga on rtest; create trigger rtest_triga after insert or update or delete on rtest for each row execute procedure pgq.sqltriga('que2', 'ignore=dat2'); insert into rtest values (1, '666', 'newdat'); update rtest set dat = 5, dat2 = 'newdat2'; update rtest set dat = 6; delete from rtest; -- test hashed pkey -- drop trigger rtest_triga on rtest; -- create trigger rtest_triga after insert or update or delete on rtest -- for each row execute procedure pgq.sqltriga('que2', 'ignore=dat2','pkey=dat,hashtext(dat)'); -- insert into rtest values (1, '666', 'newdat'); -- update rtest set dat = 5, dat2 = 'newdat2'; -- update rtest set dat = 6; -- delete from rtest; -- test wrong key drop trigger rtest_triga on rtest; create trigger rtest_triga after insert or update or delete on rtest for each row execute procedure pgq.sqltriga('que3'); insert into rtest values (1, 0, 'non-null'); insert into rtest values (2, 0, NULL); update rtest set dat2 = 'non-null2' where id=1; update rtest set dat2 = NULL where id=1; update rtest set dat2 = 'new-nonnull' where id=2; delete from rtest where id=1; delete from rtest where id=2; -- test missing pkey create table nopkey (dat text); create trigger nopkey_triga after insert or update or delete on nopkey for each row execute procedure pgq.sqltriga('que3'); insert into nopkey values ('foo'); update nopkey set dat = 'bat'; delete from nopkey; -- test custom pkey create table custom_pkey (dat1 text not null, dat2 int2 not null, dat3 text); create trigger custom_triga after insert or update or delete on custom_pkey for each row execute procedure pgq.sqltriga('que3', 'pkey=dat1,dat2'); insert into custom_pkey values ('foo', '2'); update custom_pkey set dat3 = 'bat'; delete from custom_pkey; -- test custom fields create table custom_fields ( dat1 text not null primary key, dat2 int2 not null, dat3 text, _pgq_ev_type text default 'my_type', _pgq_ev_extra1 text default 'e1', _pgq_ev_extra2 text default 'e2', _pgq_ev_extra3 text default 'e3', _pgq_ev_extra4 text default 'e4' ); create trigger customf_triga after insert or update or delete on custom_fields for each row execute procedure pgq.sqltriga('que3'); insert into custom_fields values ('foo', '2'); update custom_fields set dat3 = 'bat'; delete from custom_fields; -- test custom expression create table custom_expr ( dat1 text not null primary key, dat2 int2 not null, dat3 text ); create trigger customex_triga after insert or update or delete on custom_expr for each row execute procedure pgq.sqltriga('que3', 'ev_extra1=''test='' || dat1', 'ev_type=dat3'); insert into custom_expr values ('foo', '2'); update custom_expr set dat3 = 'bat'; delete from custom_expr; -- test pk update insert into rtest values (1, 'value1'); update rtest set id = 2; skytools-3.2.6/sql/pgq/sql/pgq_init_ext.sql0000644000000000000000000000121612426435645015712 0ustar -- create noext schema \set ECHO none \set VERBOSITY 'terse' set client_min_messages = 'warning'; \i structure/install.sql select pgq.create_queue('testqueue1'); \set ECHO all -- convert to extension create extension pgq from 'unpackaged'; select array_length(extconfig, 1) from pg_catalog.pg_extension where extname = 'pgq'; select pgq.create_queue('testqueue2'); --drop extension pgq; -- will fail select pgq.drop_queue('testqueue2'); select pgq.drop_queue('testqueue1'); -- drop schema failure drop extension pgq; -- create clean schema create extension pgq; select array_length(extconfig, 1) from pg_catalog.pg_extension where extname = 'pgq'; skytools-3.2.6/sql/pgq/pgq.control0000644000000000000000000000021712426435645014071 0ustar # pgq extension comment = 'Generic queue for PostgreSQL' default_version = '3.2.6' relocatable = false superuser = true schema = 'pg_catalog' skytools-3.2.6/sql/pgq/lowlevel/0000755000000000000000000000000012426435645013531 5ustar skytools-3.2.6/sql/pgq/lowlevel/Makefile0000644000000000000000000000025312426435645015171 0ustar MODULE_big = pgq_lowlevel DATA = pgq_lowlevel.sql SRCS = insert_event.c OBJS = $(SRCS:.c=.o) PG_CONFIG = pg_config PGXS = $(shell $(PG_CONFIG) --pgxs) include $(PGXS) skytools-3.2.6/sql/pgq/lowlevel/insert_event.c0000644000000000000000000002121512426435645016403 0ustar /* * insert_event.c - C implementation of pgq.insert_event_raw(). * * Copyright (c) 2007 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "postgres.h" #include "funcapi.h" #include "catalog/pg_type.h" #include "commands/trigger.h" #include "executor/spi.h" #include "lib/stringinfo.h" #include "utils/builtins.h" #include "utils/datetime.h" #include "utils/hsearch.h" #include "access/xact.h" /* * Module tag */ #ifdef PG_MODULE_MAGIC PG_MODULE_MAGIC; #endif #ifndef TextDatumGetCString #define TextDatumGetCString(d) DatumGetCString(DirectFunctionCall1(textout, d)) #endif /* * Function tag */ Datum pgq_insert_event_raw(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1(pgq_insert_event_raw); /* * Queue info fetching. * * Always touch ev_id sequence, even if ev_id is given as arg, * to notify ticker about new event. */ #define QUEUE_SQL \ "select queue_id::int4, queue_data_pfx::text," \ " queue_cur_table::int4, nextval(queue_event_seq)::int8," \ " queue_disable_insert::bool," \ " queue_per_tx_limit::int4" \ " from pgq.queue where queue_name = $1" #define COL_QUEUE_ID 1 #define COL_PREFIX 2 #define COL_TBLNO 3 #define COL_EVENT_ID 4 #define COL_DISABLED 5 #define COL_LIMIT 6 /* * Support inserting into pgq 2 queues. */ #define QUEUE_SQL_OLD \ "select queue_id::int4, queue_data_pfx::text," \ " queue_cur_table::int4, nextval(queue_event_seq)::int8," \ " false::bool as queue_disable_insert," \ " null::int4 as queue_per_tx_limit" \ " from pgq.queue where queue_name = $1" #define QUEUE_CHECK_NEW \ "select 1 from pg_catalog.pg_attribute" \ " where attname = 'queue_per_tx_limit'" \ " and attrelid = 'pgq.queue'::regclass" /* * Plan cache entry in HTAB. */ struct InsertCacheEntry { Oid queue_id; /* actually int32, but we want to use oid_hash */ int cur_table; TransactionId last_xid; int last_count; void *plan; }; /* * helper structure to pass values. */ struct QueueState { int queue_id; int cur_table; char *table_prefix; Datum next_event_id; bool disabled; int per_tx_limit; }; /* * Cached plans. */ static void *queue_plan; static HTAB *insert_cache; /* * Prepare utility plans and plan cache. */ static void init_cache(void) { static int init_done = 0; Oid types[1] = { TEXTOID }; HASHCTL ctl; int flags; int res; int max_queues = 128; const char *sql; if (init_done) return; /* * Check if old (v2.x) or new (v3.x) queue table. * * Needed for upgrades. */ res = SPI_execute(QUEUE_CHECK_NEW, 1, 0); if (res < 0) elog(ERROR, "pgq.insert_event: QUEUE_CHECK_NEW failed"); if (SPI_processed > 0) { sql = QUEUE_SQL; } else { sql = QUEUE_SQL_OLD; } /* * Init plans. */ queue_plan = SPI_saveplan(SPI_prepare(sql, 1, types)); if (queue_plan == NULL) elog(ERROR, "pgq_insert: SPI_prepare() failed"); /* * init insert plan cache. */ MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(struct InsertCacheEntry); ctl.hash = oid_hash; flags = HASH_ELEM | HASH_FUNCTION; insert_cache = hash_create("pgq_insert_raw plans cache", max_queues, &ctl, flags); init_done = 1; } /* * Create new plan for insertion into current queue table. */ static void *make_plan(struct QueueState *state) { void *plan; StringInfo sql; static Oid types[10] = { INT8OID, TIMESTAMPTZOID, INT4OID, INT4OID, TEXTOID, TEXTOID, TEXTOID, TEXTOID, TEXTOID, TEXTOID }; /* * create sql */ sql = makeStringInfo(); appendStringInfo(sql, "insert into %s_%d (ev_id, ev_time, ev_owner, ev_retry," " ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4)" " values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)", state->table_prefix, state->cur_table); /* * create plan */ plan = SPI_prepare(sql->data, 10, types); return SPI_saveplan(plan); } /* * fetch insert plan from cache. */ static void *load_insert_plan(Datum qname, struct QueueState *state) { struct InsertCacheEntry *entry; Oid queue_id = state->queue_id; bool did_exist = false; entry = hash_search(insert_cache, &queue_id, HASH_ENTER, &did_exist); if (did_exist) { if (entry->plan && state->cur_table == entry->cur_table) goto valid_table; if (entry->plan) SPI_freeplan(entry->plan); } entry->cur_table = state->cur_table; entry->last_xid = 0; entry->plan = NULL; /* this can fail, struct must be valid before */ entry->plan = make_plan(state); valid_table: if (state->per_tx_limit >= 0) { TransactionId xid = GetTopTransactionId(); if (entry->last_xid != xid) { entry->last_xid = xid; entry->last_count = 0; } entry->last_count++; if (entry->last_count > state->per_tx_limit) elog(ERROR, "Queue '%s' allows max %d events from one TX", TextDatumGetCString(qname), state->per_tx_limit); } return entry->plan; } /* * load queue info from pgq.queue table. */ static void load_queue_info(Datum queue_name, struct QueueState *state) { Datum values[1]; int res; TupleDesc desc; HeapTuple row; bool isnull; values[0] = queue_name; res = SPI_execute_plan(queue_plan, values, NULL, false, 0); if (res != SPI_OK_SELECT) elog(ERROR, "Queue fetch failed"); if (SPI_processed == 0) elog(ERROR, "No such queue"); row = SPI_tuptable->vals[0]; desc = SPI_tuptable->tupdesc; state->queue_id = DatumGetInt32(SPI_getbinval(row, desc, COL_QUEUE_ID, &isnull)); if (isnull) elog(ERROR, "queue id NULL"); state->cur_table = DatumGetInt32(SPI_getbinval(row, desc, COL_TBLNO, &isnull)); if (isnull) elog(ERROR, "table nr NULL"); state->table_prefix = SPI_getvalue(row, desc, COL_PREFIX); if (state->table_prefix == NULL) elog(ERROR, "table prefix NULL"); state->next_event_id = SPI_getbinval(row, desc, COL_EVENT_ID, &isnull); if (isnull) elog(ERROR, "Seq name NULL"); state->disabled = SPI_getbinval(row, desc, COL_DISABLED, &isnull); if (isnull) elog(ERROR, "insert_disabled NULL"); state->per_tx_limit = SPI_getbinval(row, desc, COL_LIMIT, &isnull); if (isnull) state->per_tx_limit = -1; } /* * Arguments: * 0: queue_name text NOT NULL * 1: ev_id int8 if NULL take from SEQ * 2: ev_time timestamptz if NULL use now() * 3: ev_owner int4 * 4: ev_retry int4 * 5: ev_type text * 6: ev_data text * 7: ev_extra1 text * 8: ev_extra2 text * 9: ev_extra3 text * 10:ev_extra4 text */ Datum pgq_insert_event_raw(PG_FUNCTION_ARGS) { Datum values[11]; char nulls[11]; struct QueueState state; int64 ret_id; void *ins_plan; Datum ev_id, ev_time; int i, res; Datum qname; if (PG_NARGS() < 6) elog(ERROR, "Need at least 6 arguments"); if (PG_ARGISNULL(0)) elog(ERROR, "Queue name must not be NULL"); qname = PG_GETARG_DATUM(0); if (SPI_connect() < 0) elog(ERROR, "SPI_connect() failed"); init_cache(); load_queue_info(qname, &state); /* * Check if queue has disable_insert flag set. */ #if defined(PG_VERSION_NUM) && PG_VERSION_NUM >= 80300 /* 8.3+: allow insert_event() even if connection is in 'replica' role */ if (state.disabled) { if (SessionReplicationRole != SESSION_REPLICATION_ROLE_REPLICA) elog(ERROR, "Insert into queue disallowed"); } #else /* pre-8.3 */ if (state.disabled) elog(ERROR, "Insert into queue disallowed"); #endif if (PG_ARGISNULL(1)) ev_id = state.next_event_id; else ev_id = PG_GETARG_DATUM(1); if (PG_ARGISNULL(2)) ev_time = DirectFunctionCall1(now, 0); else ev_time = PG_GETARG_DATUM(2); /* * Prepare arguments for INSERT */ values[0] = ev_id; nulls[0] = ' '; values[1] = ev_time; nulls[1] = ' '; for (i = 3; i < 11; i++) { int dst = i - 1; if (i >= PG_NARGS() || PG_ARGISNULL(i)) { values[dst] = (Datum)NULL; nulls[dst] = 'n'; } else { values[dst] = PG_GETARG_DATUM(i); nulls[dst] = ' '; } } /* * Perform INSERT into queue table. */ ins_plan = load_insert_plan(qname, &state); res = SPI_execute_plan(ins_plan, values, nulls, false, 0); if (res != SPI_OK_INSERT) elog(ERROR, "Queue insert failed"); /* * ev_id cannot pass SPI_finish() */ ret_id = DatumGetInt64(ev_id); if (SPI_finish() < 0) elog(ERROR, "SPI_finish failed"); PG_RETURN_INT64(ret_id); } skytools-3.2.6/sql/pgq/lowlevel/pgq_lowlevel.sql0000644000000000000000000000222212426435645016750 0ustar -- ---------------------------------------------------------------------- -- Function: pgq.insert_event_raw(11) -- -- Actual event insertion. Used also by retry queue maintenance. -- -- Parameters: -- queue_name - Name of the queue -- ev_id - Event ID. If NULL, will be taken from seq. -- ev_time - Event creation time. -- ev_owner - Subscription ID when retry event. If NULL, the event is for everybody. -- ev_retry - Retry count. NULL for first-time events. -- ev_type - user data -- ev_data - user data -- ev_extra1 - user data -- ev_extra2 - user data -- ev_extra3 - user data -- ev_extra4 - user data -- -- Returns: -- Event ID. -- ---------------------------------------------------------------------- CREATE OR REPLACE FUNCTION pgq.insert_event_raw( queue_name text, ev_id bigint, ev_time timestamptz, ev_owner integer, ev_retry integer, ev_type text, ev_data text, ev_extra1 text, ev_extra2 text, ev_extra3 text, ev_extra4 text) RETURNS int8 AS '$libdir/pgq_lowlevel', 'pgq_insert_event_raw' LANGUAGE C; skytools-3.2.6/sql/pgq/old/0000755000000000000000000000000012426435645012456 5ustar skytools-3.2.6/sql/pgq/old/pgq.sqltriga.sql0000644000000000000000000001420712426435645015617 0ustar -- listen trigger: -- create trigger triga_nimi after insert or update on customer -- for each row execute procedure pgq.sqltriga('qname'); -- redirect trigger: -- create trigger triga_nimi after insert or update on customer -- for each row execute procedure pgq.sqltriga('qname', 'ret=SKIP'); create or replace function pgq.sqltriga() returns trigger as $$ # -- ---------------------------------------------------------------------- # -- Function: pgq.sqltriga() # -- # -- Trigger function that puts row data in partial SQL form into queue. # -- # -- Parameters: # -- arg1 - queue name # -- arg2 - optional urlencoded options # -- # -- Extra options: # -- # -- ret - return value for function OK/SKIP # -- pkey - override pkey fields, can be functions # -- ignore - comma separated field names to ignore # -- # -- Queue event fields: # -- ev_type - I/U/D # -- ev_data - partial SQL statement # -- ev_extra1 - table name # -- # -- ---------------------------------------------------------------------- # this triger takes 1 or 2 args: # queue_name - destination queue # args - urlencoded dict of options: # ret - return value: OK/SKIP # pkey - comma-separated col names or funcs on cols # simple: pkey=user,orderno # hashed: pkey=user,hashtext(user) # ignore - comma-separated col names to ignore # on first call init stuff if not 'init_done' in SD: # find table name plan q = "SELECT n.nspname || '.' || c.relname AS table_name"\ " FROM pg_namespace n, pg_class c"\ " WHERE n.oid = c.relnamespace AND c.oid = $1" SD['name_plan'] = plpy.prepare(q, ['oid']) # find key columns plan q = "SELECT k.attname FROM pg_index i, pg_attribute k"\ " WHERE i.indrelid = $1 AND k.attrelid = i.indexrelid"\ " AND i.indisprimary AND k.attnum > 0 AND NOT k.attisdropped"\ " ORDER BY k.attnum" SD['key_plan'] = plpy.prepare(q, ['oid']) # data insertion q = "SELECT pgq.insert_event($1, $2, $3, $4, null, null, null)" SD['ins_plan'] = plpy.prepare(q, ['text', 'text', 'text', 'text']) # shorter tags SD['op_map'] = {'INSERT': 'I', 'UPDATE': 'U', 'DELETE': 'D'} # quoting from psycopg import QuotedString def quote(s): if s is None: return "null" s = str(s) return str(QuotedString(s)) s = s.replace('\\', '\\\\') s = s.replace("'", "''") return "'%s'" % s # TableInfo class import re, urllib class TableInfo: func_rc = re.compile("([^(]+) [(] ([^)]+) [)]", re.I | re.X) def __init__(self, table_oid, options_txt): res = plpy.execute(SD['name_plan'], [table_oid]) self.name = res[0]['table_name'] self.parse_options(options_txt) self.load_pkey() def recheck(self, options_txt): if self.options_txt == options_txt: return self.parse_options(options_txt) self.load_pkey() def parse_options(self, options_txt): self.options = {'ret': 'OK'} if options_txt: for s in options_txt.split('&'): k, v = s.split('=', 1) self.options[k] = urllib.unquote_plus(v) self.options_txt = options_txt def load_pkey(self): self.pkey_list = [] if not 'pkey' in self.options: res = plpy.execute(SD['key_plan'], [table_oid]) for krow in res: col = krow['attname'] expr = col + "=%s" self.pkey_list.append( (col, expr) ) else: for a_pk in self.options['pkey'].split(','): m = self.func_rc.match(a_pk) if m: col = m.group(2) fn = m.group(1) expr = "%s(%s) = %s(%%s)" % (fn, col, fn) else: # normal case col = a_pk expr = col + "=%s" self.pkey_list.append( (col, expr) ) if len(self.pkey_list) == 0: plpy.error('sqltriga needs primary key on table') def get_insert_stmt(self, new): col_list = [] val_list = [] for k, v in new.items(): col_list.append(k) val_list.append(quote(v)) return "(%s) values (%s)" % (",".join(col_list), ",".join(val_list)) def get_update_stmt(self, old, new): chg_list = [] for k, v in new.items(): ov = old[k] if v == ov: continue chg_list.append("%s=%s" % (k, quote(v))) if len(chg_list) == 0: pk = self.pkey_list[0][0] chg_list.append("%s=%s" % (pk, quote(new[pk]))) return "%s where %s" % (",".join(chg_list), self.get_pkey_expr(new)) def get_pkey_expr(self, data): exp_list = [] for col, exp in self.pkey_list: exp_list.append(exp % quote(data[col])) return " and ".join(exp_list) SD['TableInfo'] = TableInfo # cache some functions def proc_insert(tbl): return tbl.get_insert_stmt(TD['new']) def proc_update(tbl): return tbl.get_update_stmt(TD['old'], TD['new']) def proc_delete(tbl): return tbl.get_pkey_expr(TD['old']) SD['event_func'] = { 'I': proc_insert, 'U': proc_update, 'D': proc_delete, } # remember init SD['init_done'] = 1 # load args table_oid = TD['relid'] queue_name = TD['args'][0] if len(TD['args']) > 1: options_str = TD['args'][1] else: options_str = '' # load & cache table data if table_oid in SD: tbl = SD[table_oid] tbl.recheck(options_str) else: tbl = SD['TableInfo'](table_oid, options_str) SD[table_oid] = tbl # generate payload op = SD['op_map'][TD['event']] data = SD['event_func'][op](tbl) # insert event plpy.execute(SD['ins_plan'], [queue_name, op, data, tbl.name]) # done return tbl.options['ret'] $$ language plpythonu; skytools-3.2.6/sql/pgq/old/pgq.insert_event_raw.sql0000644000000000000000000000625212426435645017350 0ustar create or replace function pgq.insert_event_raw( queue_name text, ev_id bigint, ev_time timestamptz, ev_owner integer, ev_retry integer, ev_type text, ev_data text, ev_extra1 text, ev_extra2 text, ev_extra3 text, ev_extra4 text) returns bigint as $$ # -- ---------------------------------------------------------------------- # -- Function: pgq.insert_event_raw(11) # -- # -- Deprecated function, replaced by C code in pgq_lowlevel.so. # -- # -- Actual event insertion. Used also by retry queue maintenance. # -- # -- Parameters: # -- queue_name - Name of the queue # -- ev_id - Event ID. If NULL, will be taken from seq. # -- ev_time - Event creation time. # -- ev_owner - Subscription ID when retry event. If NULL, the event is for everybody. # -- ev_retry - Retry count. NULL for first-time events. # -- ev_type - user data # -- ev_data - user data # -- ev_extra1 - user data # -- ev_extra2 - user data # -- ev_extra3 - user data # -- ev_extra4 - user data # -- # -- Returns: # -- Event ID. # -- ---------------------------------------------------------------------- # load args queue_name = args[0] ev_id = args[1] ev_time = args[2] ev_owner = args[3] ev_retry = args[4] ev_type = args[5] ev_data = args[6] ev_extra1 = args[7] ev_extra2 = args[8] ev_extra3 = args[9] ev_extra4 = args[10] if not "cf_plan" in SD: # get current event table q = "select queue_data_pfx, queue_cur_table, queue_event_seq "\ " from pgq.queue where queue_name = $1" SD["cf_plan"] = plpy.prepare(q, ["text"]) # get next id q = "select nextval($1) as id" SD["seq_plan"] = plpy.prepare(q, ["text"]) # get queue config res = plpy.execute(SD["cf_plan"], [queue_name]) if len(res) != 1: plpy.error("Unknown event queue: %s" % (queue_name)) tbl_prefix = res[0]["queue_data_pfx"] cur_nr = res[0]["queue_cur_table"] id_seq = res[0]["queue_event_seq"] # get id - bump seq even if id is given res = plpy.execute(SD['seq_plan'], [id_seq]) if ev_id is None: ev_id = res[0]["id"] # create plan for insertion ins_plan = None ins_key = "ins.%s" % (queue_name) if ins_key in SD: nr, ins_plan = SD[ins_key] if nr != cur_nr: ins_plan = None if ins_plan == None: q = "insert into %s_%s (ev_id, ev_time, ev_owner, ev_retry,"\ " ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4)"\ " values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)" % ( tbl_prefix, cur_nr) types = ["int8", "timestamptz", "int4", "int4", "text", "text", "text", "text", "text", "text"] ins_plan = plpy.prepare(q, types) SD[ins_key] = (cur_nr, ins_plan) # insert the event plpy.execute(ins_plan, [ev_id, ev_time, ev_owner, ev_retry, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4]) # done return ev_id $$ language plpythonu; -- event inserting needs no special perms skytools-3.2.6/sql/pgq/old/pgq.logutriga.sql0000644000000000000000000000600512426435645015763 0ustar create or replace function pgq.logutriga() returns trigger as $$ # -- ---------------------------------------------------------------------- # -- Function: pgq.logutriga() # -- # -- Trigger function that puts row data urlencoded into queue. # -- # -- Trigger parameters: # -- arg1 - queue name # -- arg2 - optionally 'SKIP' # -- # -- Queue event fields: # -- ev_type - I/U/D # -- ev_data - column values urlencoded # -- ev_extra1 - table name # -- ev_extra2 - primary key columns # -- # -- Regular listen trigger example: # -- > CREATE TRIGGER triga_nimi AFTER INSERT OR UPDATE ON customer # -- > FOR EACH ROW EXECUTE PROCEDURE pgq.logutriga('qname'); # -- # -- Redirect trigger example: # -- > CREATE TRIGGER triga_nimi AFTER INSERT OR UPDATE ON customer # -- > FOR EACH ROW EXECUTE PROCEDURE pgq.logutriga('qname', 'SKIP'); # -- ---------------------------------------------------------------------- # this triger takes 1 or 2 args: # queue_name - destination queue # option return code (OK, SKIP) SKIP means op won't happen # copy-paste of db_urlencode from skytools.quoting from urllib import quote_plus def db_urlencode(dict): elem_list = [] for k, v in dict.items(): if v is None: elem = quote_plus(str(k)) else: elem = quote_plus(str(k)) + '=' + quote_plus(str(v)) elem_list.append(elem) return '&'.join(elem_list) # load args queue_name = TD['args'][0] if len(TD['args']) > 1: ret_code = TD['args'][1] else: ret_code = 'OK' table_oid = TD['relid'] # on first call init plans if not 'init_done' in SD: # find table name q = "SELECT n.nspname || '.' || c.relname AS table_name"\ " FROM pg_namespace n, pg_class c"\ " WHERE n.oid = c.relnamespace AND c.oid = $1" SD['name_plan'] = plpy.prepare(q, ['oid']) # find key columns q = "SELECT k.attname FROM pg_index i, pg_attribute k"\ " WHERE i.indrelid = $1 AND k.attrelid = i.indexrelid"\ " AND i.indisprimary AND k.attnum > 0 AND NOT k.attisdropped"\ " ORDER BY k.attnum" SD['key_plan'] = plpy.prepare(q, ['oid']) # insert data q = "SELECT pgq.insert_event($1, $2, $3, $4, $5, null, null)" SD['ins_plan'] = plpy.prepare(q, ['text', 'text', 'text', 'text', 'text']) # shorter tags SD['op_map'] = {'INSERT': 'I', 'UPDATE': 'U', 'DELETE': 'D'} # remember init SD['init_done'] = 1 # load & cache table data if table_oid in SD: tbl_name, tbl_keys = SD[table_oid] else: res = plpy.execute(SD['name_plan'], [table_oid]) tbl_name = res[0]['table_name'] res = plpy.execute(SD['key_plan'], [table_oid]) tbl_keys = ",".join(map(lambda x: x['attname'], res)) SD[table_oid] = (tbl_name, tbl_keys) # prepare args if TD['event'] == 'DELETE': data = db_urlencode(TD['old']) else: data = db_urlencode(TD['new']) # insert event plpy.execute(SD['ins_plan'], [ queue_name, SD['op_map'][TD['event']], data, tbl_name, tbl_keys]) # done return ret_code $$ language plpythonu; skytools-3.2.6/sql/pgq/expected/0000755000000000000000000000000012426435645013501 5ustar skytools-3.2.6/sql/pgq/expected/sqltriga.out0000644000000000000000000001261412426435645016064 0ustar \set VERBOSITY 'terse' set client_min_messages = 'warning'; -- start testing create table rtest ( id integer primary key, dat text ); create trigger rtest_triga after insert or update or delete on rtest for each row execute procedure pgq.sqltriga('que'); -- simple test insert into rtest values (1, 'value1'); WARNING: insert_event(que, I, (id,dat) values ('1','value1'), public.rtest) update rtest set dat = 'value2'; WARNING: insert_event(que, U, dat='value2' where id='1', public.rtest) delete from rtest; WARNING: insert_event(que, D, id='1', public.rtest) -- test new fields alter table rtest add column dat2 text; insert into rtest values (1, 'value1'); WARNING: insert_event(que, I, (id,dat,dat2) values ('1','value1',null), public.rtest) update rtest set dat = 'value2'; WARNING: insert_event(que, U, dat='value2' where id='1', public.rtest) delete from rtest; WARNING: insert_event(que, D, id='1', public.rtest) -- test field ignore drop trigger rtest_triga on rtest; create trigger rtest_triga after insert or update or delete on rtest for each row execute procedure pgq.sqltriga('que2', 'ignore=dat2'); insert into rtest values (1, '666', 'newdat'); WARNING: insert_event(que2, I, (id,dat) values ('1','666'), public.rtest) update rtest set dat = 5, dat2 = 'newdat2'; WARNING: insert_event(que2, U, dat='5' where id='1', public.rtest) update rtest set dat = 6; WARNING: insert_event(que2, U, dat='6' where id='1', public.rtest) delete from rtest; WARNING: insert_event(que2, D, id='1', public.rtest) -- test hashed pkey -- drop trigger rtest_triga on rtest; -- create trigger rtest_triga after insert or update or delete on rtest -- for each row execute procedure pgq.sqltriga('que2', 'ignore=dat2','pkey=dat,hashtext(dat)'); -- insert into rtest values (1, '666', 'newdat'); -- update rtest set dat = 5, dat2 = 'newdat2'; -- update rtest set dat = 6; -- delete from rtest; -- test wrong key drop trigger rtest_triga on rtest; create trigger rtest_triga after insert or update or delete on rtest for each row execute procedure pgq.sqltriga('que3'); insert into rtest values (1, 0, 'non-null'); WARNING: insert_event(que3, I, (id,dat,dat2) values ('1','0','non-null'), public.rtest) insert into rtest values (2, 0, NULL); WARNING: insert_event(que3, I, (id,dat,dat2) values ('2','0',null), public.rtest) update rtest set dat2 = 'non-null2' where id=1; WARNING: insert_event(que3, U, dat2='non-null2' where id='1', public.rtest) update rtest set dat2 = NULL where id=1; WARNING: insert_event(que3, U, dat2=NULL where id='1', public.rtest) update rtest set dat2 = 'new-nonnull' where id=2; WARNING: insert_event(que3, U, dat2='new-nonnull' where id='2', public.rtest) delete from rtest where id=1; WARNING: insert_event(que3, D, id='1', public.rtest) delete from rtest where id=2; WARNING: insert_event(que3, D, id='2', public.rtest) -- test missing pkey create table nopkey (dat text); create trigger nopkey_triga after insert or update or delete on nopkey for each row execute procedure pgq.sqltriga('que3'); insert into nopkey values ('foo'); WARNING: insert_event(que3, I, (dat) values ('foo'), public.nopkey) update nopkey set dat = 'bat'; ERROR: Update/Delete on table without pkey delete from nopkey; ERROR: Update/Delete on table without pkey -- test custom pkey create table custom_pkey (dat1 text not null, dat2 int2 not null, dat3 text); create trigger custom_triga after insert or update or delete on custom_pkey for each row execute procedure pgq.sqltriga('que3', 'pkey=dat1,dat2'); insert into custom_pkey values ('foo', '2'); WARNING: insert_event(que3, I, (dat1,dat2,dat3) values ('foo','2',null), public.custom_pkey) update custom_pkey set dat3 = 'bat'; WARNING: insert_event(que3, U, dat3='bat' where dat1='foo' and dat2='2', public.custom_pkey) delete from custom_pkey; WARNING: insert_event(que3, D, dat1='foo' and dat2='2', public.custom_pkey) -- test custom fields create table custom_fields ( dat1 text not null primary key, dat2 int2 not null, dat3 text, _pgq_ev_type text default 'my_type', _pgq_ev_extra1 text default 'e1', _pgq_ev_extra2 text default 'e2', _pgq_ev_extra3 text default 'e3', _pgq_ev_extra4 text default 'e4' ); create trigger customf_triga after insert or update or delete on custom_fields for each row execute procedure pgq.sqltriga('que3'); insert into custom_fields values ('foo', '2'); WARNING: insert_event(que3, my_type, (dat1,dat2,dat3) values ('foo','2',null), e1) update custom_fields set dat3 = 'bat'; WARNING: insert_event(que3, my_type, dat3='bat' where dat1='foo', e1) delete from custom_fields; WARNING: insert_event(que3, my_type, dat1='foo', e1) -- test custom expression create table custom_expr ( dat1 text not null primary key, dat2 int2 not null, dat3 text ); create trigger customex_triga after insert or update or delete on custom_expr for each row execute procedure pgq.sqltriga('que3', 'ev_extra1=''test='' || dat1', 'ev_type=dat3'); insert into custom_expr values ('foo', '2'); WARNING: insert_event(que3, , (dat1,dat2,dat3) values ('foo','2',null), test=foo) update custom_expr set dat3 = 'bat'; WARNING: insert_event(que3, bat, dat3='bat' where dat1='foo', test=foo) delete from custom_expr; WARNING: insert_event(que3, bat, dat1='foo', test=foo) -- test pk update insert into rtest values (1, 'value1'); WARNING: insert_event(que3, I, (id,dat,dat2) values ('1','value1',null), public.rtest) update rtest set id = 2; ERROR: primary key update not allowed skytools-3.2.6/sql/pgq/expected/pgq_init_noext.out0000644000000000000000000000011212426435645017253 0ustar \set ECHO none upgrade_schema ---------------- 0 (1 row) skytools-3.2.6/sql/pgq/expected/pgq_core.out0000644000000000000000000003004112426435645016027 0ustar \set VERBOSITY 'terse' set client_min_messages = 'warning'; select * from pgq.maint_tables_to_vacuum(); maint_tables_to_vacuum ------------------------ (0 rows) select * from pgq.maint_retry_events(); maint_retry_events -------------------- 0 (1 row) select pgq.create_queue('tmpqueue'); create_queue -------------- 1 (1 row) select pgq.register_consumer('tmpqueue', 'consumer'); register_consumer ------------------- 1 (1 row) select pgq.unregister_consumer('tmpqueue', 'consumer'); unregister_consumer --------------------- 1 (1 row) select pgq.drop_queue('tmpqueue'); drop_queue ------------ 1 (1 row) select pgq.create_queue('myqueue'); create_queue -------------- 1 (1 row) select pgq.register_consumer('myqueue', 'consumer'); register_consumer ------------------- 1 (1 row) update pgq.queue set queue_ticker_max_lag = '0', queue_ticker_idle_period = '0'; select pgq.next_batch('myqueue', 'consumer'); next_batch ------------ (1 row) select pgq.next_batch('myqueue', 'consumer'); next_batch ------------ (1 row) select pgq.ticker(); ticker -------- 1 (1 row) select pgq.next_batch('myqueue', 'consumer'); next_batch ------------ 1 (1 row) select pgq.next_batch('myqueue', 'consumer'); next_batch ------------ 1 (1 row) select queue_name, consumer_name, prev_tick_id, tick_id, lag < '30 seconds' as lag_exists from pgq.get_batch_info(1); queue_name | consumer_name | prev_tick_id | tick_id | lag_exists ------------+---------------+--------------+---------+------------ myqueue | consumer | 1 | 2 | t (1 row) select queue_name, queue_ntables, queue_cur_table, queue_rotation_period, queue_switch_time <= now() as switch_time_exists, queue_external_ticker, queue_ticker_max_count, queue_ticker_max_lag, queue_ticker_idle_period, ticker_lag < '2 hours' as ticker_lag_exists, last_tick_id from pgq.get_queue_info() order by 1; queue_name | queue_ntables | queue_cur_table | queue_rotation_period | switch_time_exists | queue_external_ticker | queue_ticker_max_count | queue_ticker_max_lag | queue_ticker_idle_period | ticker_lag_exists | last_tick_id ------------+---------------+-----------------+-----------------------+--------------------+-----------------------+------------------------+----------------------+--------------------------+-------------------+-------------- myqueue | 3 | 0 | @ 2 hours | t | f | 500 | @ 0 | @ 0 | t | 2 (1 row) select queue_name, consumer_name, lag < '30 seconds' as lag_exists, last_seen < '30 seconds' as last_seen_exists, last_tick, current_batch, next_tick from pgq.get_consumer_info() order by 1, 2; queue_name | consumer_name | lag_exists | last_seen_exists | last_tick | current_batch | next_tick ------------+---------------+------------+------------------+-----------+---------------+----------- myqueue | consumer | t | t | 1 | 1 | 2 (1 row) select pgq.finish_batch(1); finish_batch -------------- 1 (1 row) select pgq.finish_batch(1); WARNING: finish_batch: batch 1 not found finish_batch -------------- 0 (1 row) select pgq.ticker(); ticker -------- 1 (1 row) select pgq.next_batch('myqueue', 'consumer'); next_batch ------------ 2 (1 row) select * from pgq.batch_event_tables(2); batch_event_tables -------------------- pgq.event_2_0 (1 row) select * from pgq.get_batch_events(2); ev_id | ev_time | ev_txid | ev_retry | ev_type | ev_data | ev_extra1 | ev_extra2 | ev_extra3 | ev_extra4 -------+---------+---------+----------+---------+---------+-----------+-----------+-----------+----------- (0 rows) select pgq.finish_batch(2); finish_batch -------------- 1 (1 row) select pgq.insert_event('myqueue', 'r1', 'data'); insert_event -------------- 1 (1 row) select pgq.insert_event('myqueue', 'r2', 'data', 'extra1', 'extra2', 'extra3', 'extra4'); insert_event -------------- 2 (1 row) select pgq.insert_event('myqueue', 'r3', 'data'); insert_event -------------- 3 (1 row) select pgq.current_event_table('myqueue'); current_event_table --------------------- pgq.event_2_0 (1 row) select pgq.ticker(); ticker -------- 1 (1 row) select * from pgq.next_batch_custom('myqueue', 'consumer', '1 hour', null, null); batch_id | cur_tick_id | prev_tick_id | cur_tick_time | prev_tick_time | cur_tick_event_seq | prev_tick_event_seq ----------+-------------+--------------+---------------+----------------+--------------------+--------------------- | | | | | | (1 row) select * from pgq.next_batch_custom('myqueue', 'consumer', null, 10000, null); batch_id | cur_tick_id | prev_tick_id | cur_tick_time | prev_tick_time | cur_tick_event_seq | prev_tick_event_seq ----------+-------------+--------------+---------------+----------------+--------------------+--------------------- | | | | | | (1 row) select * from pgq.next_batch_custom('myqueue', 'consumer', null, null, '10 minutes'); batch_id | cur_tick_id | prev_tick_id | cur_tick_time | prev_tick_time | cur_tick_event_seq | prev_tick_event_seq ----------+-------------+--------------+---------------+----------------+--------------------+--------------------- | | | | | | (1 row) select pgq.next_batch('myqueue', 'consumer'); next_batch ------------ 3 (1 row) select ev_id,ev_retry,ev_type,ev_data,ev_extra1,ev_extra2,ev_extra3,ev_extra4 from pgq.get_batch_events(3); ev_id | ev_retry | ev_type | ev_data | ev_extra1 | ev_extra2 | ev_extra3 | ev_extra4 -------+----------+---------+---------+-----------+-----------+-----------+----------- 1 | | r1 | data | | | | 2 | | r2 | data | extra1 | extra2 | extra3 | extra4 3 | | r3 | data | | | | (3 rows) begin; select ev_id,ev_retry,ev_type,ev_data,ev_extra1,ev_extra2,ev_extra3,ev_extra4 from pgq.get_batch_cursor(3, 'acurs', 10); ev_id | ev_retry | ev_type | ev_data | ev_extra1 | ev_extra2 | ev_extra3 | ev_extra4 -------+----------+---------+---------+-----------+-----------+-----------+----------- 1 | | r1 | data | | | | 2 | | r2 | data | extra1 | extra2 | extra3 | extra4 3 | | r3 | data | | | | (3 rows) close acurs; select ev_id,ev_retry,ev_type,ev_data,ev_extra1,ev_extra2,ev_extra3,ev_extra4 from pgq.get_batch_cursor(3, 'acurs', 2); ev_id | ev_retry | ev_type | ev_data | ev_extra1 | ev_extra2 | ev_extra3 | ev_extra4 -------+----------+---------+---------+-----------+-----------+-----------+----------- 1 | | r1 | data | | | | 2 | | r2 | data | extra1 | extra2 | extra3 | extra4 (2 rows) close acurs; select ev_id,ev_retry,ev_type,ev_data,ev_extra1,ev_extra2,ev_extra3,ev_extra4 from pgq.get_batch_cursor(3, 'acurs', 2, 'ev_id = 1'); ev_id | ev_retry | ev_type | ev_data | ev_extra1 | ev_extra2 | ev_extra3 | ev_extra4 -------+----------+---------+---------+-----------+-----------+-----------+----------- 1 | | r1 | data | | | | (1 row) close acurs; end; select pgq.event_retry(3, 2, 0); event_retry ------------- 1 (1 row) select pgq.batch_retry(3, 0); batch_retry ------------- 2 (1 row) select pgq.finish_batch(3); finish_batch -------------- 1 (1 row) select pgq.event_retry_raw('myqueue', 'consumer', now(), 666, now(), 0, 'rawtest', 'data', null, null, null, null); event_retry_raw ----------------- 666 (1 row) select pgq.ticker(); ticker -------- 1 (1 row) -- test maint update pgq.queue set queue_rotation_period = '0 seconds'; select queue_name, pgq.maint_rotate_tables_step1(queue_name) from pgq.queue; queue_name | maint_rotate_tables_step1 ------------+--------------------------- myqueue | 0 (1 row) select pgq.maint_rotate_tables_step2(); maint_rotate_tables_step2 --------------------------- 0 (1 row) -- test extra select nextval(queue_event_seq) from pgq.queue where queue_name = 'myqueue'; nextval --------- 4 (1 row) select pgq.force_tick('myqueue'); force_tick ------------ 5 (1 row) select nextval(queue_event_seq) from pgq.queue where queue_name = 'myqueue'; nextval --------- 2006 (1 row) create sequence tmptest_seq; select pgq.seq_getval('tmptest_seq'); seq_getval ------------ 1 (1 row) select pgq.seq_setval('tmptest_seq', 10); seq_setval ------------ 10 (1 row) select pgq.seq_setval('tmptest_seq', 5); seq_setval ------------ 10 (1 row) select pgq.seq_setval('tmptest_seq', 15); seq_setval ------------ 15 (1 row) select pgq.seq_getval('tmptest_seq'); seq_getval ------------ 15 (1 row) -- test disabled select pgq.insert_event('myqueue', 'test', 'event'); insert_event -------------- 2007 (1 row) update pgq.queue set queue_disable_insert = true where queue_name = 'myqueue'; select pgq.insert_event('myqueue', 'test', 'event'); ERROR: Insert into queue disallowed update pgq.queue set queue_disable_insert = false where queue_name = 'myqueue'; select pgq.insert_event('myqueue', 'test', 'event'); insert_event -------------- 2009 (1 row) -- test limit update pgq.queue set queue_per_tx_limit = 2 where queue_name = 'myqueue'; begin; select pgq.insert_event('myqueue', 'test', 'event1'); insert_event -------------- 2010 (1 row) select pgq.insert_event('myqueue', 'test', 'event2'); insert_event -------------- 2011 (1 row) select pgq.insert_event('myqueue', 'test', 'event3'); ERROR: Queue 'myqueue' allows max 2 events from one TX end; update pgq.queue set queue_per_tx_limit = 0 where queue_name = 'myqueue'; begin; select pgq.insert_event('myqueue', 'test', 'event1'); ERROR: Queue 'myqueue' allows max 0 events from one TX select pgq.insert_event('myqueue', 'test', 'event2'); ERROR: current transaction is aborted, commands ignored until end of transaction block select pgq.insert_event('myqueue', 'test', 'event3'); ERROR: current transaction is aborted, commands ignored until end of transaction block end; update pgq.queue set queue_per_tx_limit = null where queue_name = 'myqueue'; begin; select pgq.insert_event('myqueue', 'test', 'event1'); insert_event -------------- 2014 (1 row) select pgq.insert_event('myqueue', 'test', 'event2'); insert_event -------------- 2015 (1 row) select pgq.insert_event('myqueue', 'test', 'event3'); insert_event -------------- 2016 (1 row) end; select * from pgq.maint_operations(); func_name | func_arg -------------------------------+---------- pgq.maint_rotate_tables_step1 | myqueue pgq.maint_rotate_tables_step2 | (2 rows) alter table pgq.queue add column queue_extra_maint text[]; select * from pgq.maint_operations(); func_name | func_arg -------------------------------+---------- pgq.maint_rotate_tables_step1 | myqueue pgq.maint_rotate_tables_step2 | (2 rows) update pgq.queue set queue_extra_maint = array['baz', 'foo.bar']; select * from pgq.maint_operations(); func_name | func_arg -------------------------------+---------- pgq.maint_rotate_tables_step1 | myqueue pgq.maint_rotate_tables_step2 | baz | myqueue foo.bar | myqueue (4 rows) skytools-3.2.6/sql/pgq/expected/logutriga.out0000644000000000000000000001305612426435645016234 0ustar \set VERBOSITY 'terse' set client_min_messages = 'warning'; select 1 from (select set_config(name, 'escape', false) as ignore from pg_settings where name = 'bytea_output') x where x.ignore = 'foo'; ?column? ---------- (0 rows) create or replace function pgq.insert_event(queue_name text, ev_type text, ev_data text, ev_extra1 text, ev_extra2 text, ev_extra3 text, ev_extra4 text) returns bigint as $$ begin raise warning 'insert_event(%, %, %, %)', queue_name, ev_type, ev_data, ev_extra1; return 1; end; $$ language plpgsql; create table udata ( id serial primary key, txt text, bin bytea ); create trigger utest AFTER insert or update or delete ON udata for each row execute procedure pgq.logutriga('udata_que'); insert into udata (txt) values ('text1'); WARNING: insert_event(udata_que, I:id, id=1&txt=text1&bin, public.udata) insert into udata (bin) values (E'bi\tn\\000bin'); WARNING: insert_event(udata_que, I:id, id=2&txt&bin=bi%5c011n%5c000bin, public.udata) -- test ignore drop trigger utest on udata; truncate udata; create trigger utest after insert or update or delete on udata for each row execute procedure pgq.logutriga('udata_que', 'ignore=bin'); insert into udata values (1, 'txt', 'bin'); WARNING: insert_event(udata_que, I:id, id=1&txt=txt, public.udata) update udata set txt = 'txt'; WARNING: insert_event(udata_que, U:id, id=1&txt=txt, public.udata) update udata set txt = 'txt2', bin = 'bin2'; WARNING: insert_event(udata_que, U:id, id=1&txt=txt2, public.udata) update udata set bin = 'bin3'; delete from udata; WARNING: insert_event(udata_que, D:id, id=1&txt=txt2, public.udata) -- test missing pkey create table nopkey2 (dat text); create trigger nopkey_triga2 after insert or update or delete on nopkey2 for each row execute procedure pgq.logutriga('que3'); insert into nopkey2 values ('foo'); WARNING: insert_event(que3, I:, dat=foo, public.nopkey2) update nopkey2 set dat = 'bat'; ERROR: Update/Delete on table without pkey delete from nopkey2; ERROR: Update/Delete on table without pkey -- test custom pkey create table ucustom_pkey (dat1 text not null, dat2 int2 not null, dat3 text); create trigger ucustom_triga after insert or update or delete on ucustom_pkey --for each row execute procedure pgq.logutriga('que3', 'pkey=dat1,dat2'); for each row execute procedure pgq.logutriga('que3'); insert into ucustom_pkey values ('foo', '2'); WARNING: insert_event(que3, I:, dat1=foo&dat2=2&dat3, public.ucustom_pkey) update ucustom_pkey set dat3 = 'bat'; ERROR: Update/Delete on table without pkey delete from ucustom_pkey; ERROR: Update/Delete on table without pkey -- test custom fields create table custom_fields2 ( dat1 text not null primary key, dat2 int2 not null, dat3 text, _pgq_ev_type text default 'my_type', _pgq_ev_extra1 text default 'e1', _pgq_ev_extra2 text default 'e2', _pgq_ev_extra3 text default 'e3', _pgq_ev_extra4 text default 'e4' ); create trigger customf2_triga after insert or update or delete on custom_fields2 for each row execute procedure pgq.logutriga('que3'); insert into custom_fields2 values ('foo', '2'); WARNING: insert_event(que3, my_type, dat1=foo&dat2=2&dat3, e1) update custom_fields2 set dat3 = 'bat'; WARNING: insert_event(que3, my_type, dat1=foo&dat2=2&dat3=bat, e1) delete from custom_fields2; WARNING: insert_event(que3, my_type, dat1=foo&dat2=2&dat3=bat, e1) -- test custom expression create table custom_expr2 ( dat1 text not null primary key, dat2 int2 not null, dat3 text ); create trigger customex2_triga after insert or update or delete on custom_expr2 for each row execute procedure pgq.logutriga('que3', 'ev_extra1=''test='' || dat1', 'ev_type=dat3'); insert into custom_expr2 values ('foo', '2'); WARNING: insert_event(que3, , dat1=foo&dat2=2&dat3, test=foo) update custom_expr2 set dat3 = 'bat'; WARNING: insert_event(que3, bat, dat1=foo&dat2=2&dat3=bat, test=foo) delete from custom_expr2; WARNING: insert_event(que3, bat, dat1=foo&dat2=2&dat3=bat, test=foo) -- test when= create table when_test ( dat1 text not null primary key, dat2 int2 not null, dat3 text ); create trigger when_triga after insert or update or delete on when_test for each row execute procedure pgq.logutriga('que3', 'when=dat1=''foo'''); insert into when_test values ('foo', '2'); WARNING: insert_event(que3, I:dat1, dat1=foo&dat2=2&dat3, public.when_test) insert into when_test values ('bar', '2'); select * from when_test; dat1 | dat2 | dat3 ------+------+------ foo | 2 | bar | 2 | (2 rows) update when_test set dat3 = 'bat'; WARNING: insert_event(que3, U:dat1, dat1=foo&dat2=2&dat3=bat, public.when_test) delete from when_test; WARNING: insert_event(que3, D:dat1, dat1=foo&dat2=2&dat3=bat, public.when_test) drop trigger when_triga on when_test; create trigger when_triga after insert or update or delete on when_test for each row execute procedure pgq.logutriga('que3', 'when=current_user=''random'''); insert into when_test values ('foo', '2'); select * from when_test; dat1 | dat2 | dat3 ------+------+------ foo | 2 | (1 row) -- test deny create table deny_test ( dat1 text not null primary key, dat2 text ); create trigger deny_triga after insert or update or delete on deny_test for each row execute procedure pgq.logutriga('noqueue', 'deny'); insert into deny_test values ('1', '2'); ERROR: Table 'public.deny_test' to queue 'noqueue': change not allowed (I) -- test pk update insert into udata (id, txt) values (1, 'txt'); WARNING: insert_event(udata_que, I:id, id=1&txt=txt, public.udata) update udata set id = 2; ERROR: primary key update not allowed skytools-3.2.6/sql/pgq/expected/pgq_init_upgrade_1.out0000644000000000000000000000020512426435645017770 0ustar \set ECHO none upgrade_schema ---------------- 0 (1 row) upgrade_schema ---------------- 3 (1 row) skytools-3.2.6/sql/pgq/expected/clean.out0000644000000000000000000000067612426435645015325 0ustar \set VERBOSITY 'terse' set client_min_messages = 'warning'; drop schema pgq cascade; drop sequence tmptest_seq; drop table custom_expr; drop table custom_expr2; drop table custom_fields; drop table custom_fields2; drop table custom_pkey; drop table deny_test; drop table nopkey; drop table nopkey2; drop table rtest; drop table if exists trunctrg1; drop table if exists trunctrg2; drop table ucustom_pkey; drop table udata; drop table when_test; skytools-3.2.6/sql/pgq/expected/trunctrg.out0000644000000000000000000000163512426435645016107 0ustar \set VERBOSITY 'terse' set client_min_messages = 'warning'; -- test sqltriga truncate create table trunctrg1 ( dat1 text not null primary key, dat2 int2 not null, dat3 text ); create trigger trunc1_trig after truncate on trunctrg1 for each statement execute procedure pgq.sqltriga('que3'); truncate trunctrg1; WARNING: insert_event(que3, R, , public.trunctrg1) -- test logutriga truncate create table trunctrg2 ( dat1 text not null primary key, dat2 int2 not null, dat3 text ); create trigger trunc2_trig after truncate on trunctrg2 for each statement execute procedure pgq.logutriga('que3'); truncate trunctrg2; WARNING: insert_event(que3, R, , public.trunctrg2) -- test deny create trigger deny_triga2 after truncate on trunctrg2 for each statement execute procedure pgq.logutriga('noqueue', 'deny'); truncate trunctrg2; ERROR: Table 'public.trunctrg2' to queue 'noqueue': change not allowed (R) skytools-3.2.6/sql/pgq/expected/pgq_init_upgrade.out0000644000000000000000000000020512426435645017550 0ustar \set ECHO none upgrade_schema ---------------- 0 (1 row) upgrade_schema ---------------- 0 (1 row) skytools-3.2.6/sql/pgq/expected/pgq_perms.out0000644000000000000000000000235712426435645016236 0ustar \set ECHO off drop role if exists pgq_test_producer; drop role if exists pgq_test_consumer; drop role if exists pgq_test_admin; create role pgq_test_consumer with login in role pgq_reader; create role pgq_test_producer with login in role pgq_writer; create role pgq_test_admin with login in role pgq_admin; \c - pgq_test_admin select * from pgq.create_queue('pqueue'); -- ok create_queue -------------- 1 (1 row) \c - pgq_test_producer select * from pgq.create_queue('pqueue'); -- fail ERROR: permission denied for function create_queue select * from pgq.insert_event('pqueue', 'test', 'data'); -- ok insert_event -------------- 1 (1 row) select * from pgq.register_consumer('pqueue', 'prod'); -- fail ERROR: permission denied for function register_consumer \c - pgq_test_consumer select * from pgq.create_queue('pqueue'); -- fail ERROR: permission denied for function create_queue select * from pgq.insert_event('pqueue', 'test', 'data'); -- fail ERROR: permission denied for function insert_event select * from pgq.register_consumer('pqueue', 'cons'); -- ok register_consumer ------------------- 1 (1 row) select * from pgq.next_batch('pqueue', 'cons'); -- ok next_batch ------------ (1 row) skytools-3.2.6/sql/pgq/expected/pgq_init_ext.out0000644000000000000000000000155312426435645016730 0ustar -- create noext schema \set ECHO none upgrade_schema ---------------- 0 (1 row) create_queue -------------- 1 (1 row) -- convert to extension create extension pgq from 'unpackaged'; select array_length(extconfig, 1) from pg_catalog.pg_extension where extname = 'pgq'; array_length -------------- 7 (1 row) select pgq.create_queue('testqueue2'); create_queue -------------- 1 (1 row) --drop extension pgq; -- will fail select pgq.drop_queue('testqueue2'); drop_queue ------------ 1 (1 row) select pgq.drop_queue('testqueue1'); drop_queue ------------ 1 (1 row) -- drop schema failure drop extension pgq; -- create clean schema create extension pgq; select array_length(extconfig, 1) from pg_catalog.pg_extension where extname = 'pgq'; array_length -------------- 7 (1 row) skytools-3.2.6/sql/pgq/triggers/0000755000000000000000000000000012426435645013526 5ustar skytools-3.2.6/sql/pgq/triggers/qbuilder.c0000644000000000000000000000627212426435645015510 0ustar #include #include #include "qbuilder.h" #include "parsesql.h" /* import standard_conforming_strings */ #if PG_VERSION_NUM >= 80500 #include #else #ifndef PGDLLIMPORT #define PGDLLIMPORT DLLIMPORT #endif extern PGDLLIMPORT bool standard_conforming_strings; #endif /* create QB in right context */ struct QueryBuilder *qb_create(const struct QueryBuilderOps *ops, MemoryContext ctx) { struct QueryBuilder *q; q = MemoryContextAllocZero(ctx, sizeof(*q)); q->op = ops; q->stdstr = standard_conforming_strings; q->maxargs = 8; q->arg_map = MemoryContextAlloc(ctx, q->maxargs * sizeof(int)); /* default size too large? */ q->sql.maxlen = 64; q->sql.data = MemoryContextAlloc(ctx, q->sql.maxlen); q->sql.data[0] = 0; return q; } /* add fragment without parsing */ void qb_add_raw(struct QueryBuilder *q, const char *str, int len) { if (len < 0) len = strlen(str); appendBinaryStringInfo(&q->sql, str, len); } /* the ident may or may not be argument reference */ static void qb_handle_ident(struct QueryBuilder *q, const char *ident, int len, void *arg) { int real_idx; int local_idx = -1, i; char abuf[32]; /* is argument reference? */ real_idx = q->op->name_lookup(arg, ident, len); if (real_idx < 0) { qb_add_raw(q, ident, len); return; } /* already referenced? */ for (i = 0; i < q->nargs; i++) { if (q->arg_map[i] == real_idx) { local_idx = i; break; } } /* new referece? */ if (local_idx < 0) { if (q->nargs >= FUNC_MAX_ARGS) elog(ERROR, "Too many args"); if (q->nargs >= q->maxargs) { q->arg_map = repalloc(q->arg_map, q->maxargs * 2 * sizeof(int)); q->maxargs *= 2; } local_idx = q->nargs++; q->arg_map[local_idx] = real_idx; } /* add $n to query */ snprintf(abuf, sizeof(abuf), "$%d", local_idx + 1); return qb_add_raw(q, abuf, strlen(abuf)); } /* add fragment with parsing - argument references are replaced with $n */ void qb_add_parse(struct QueryBuilder *q, const char *sql, void *arg) { int tlen, tok; /* tokenize sql, pick out argument references */ while (1) { tok = sql_tokenizer(sql, &tlen, q->stdstr); if (!tok) break; if (tok < 0) elog(ERROR, "QB: syntax error"); if (tok == T_WORD) { qb_handle_ident(q, sql, tlen, arg); } else { qb_add_raw(q, sql, tlen); } sql += tlen; } } /* prepare */ void qb_prepare(struct QueryBuilder *q, void *arg) { Oid types[FUNC_MAX_ARGS]; void *plan; int i; for (i = 0; i < q->nargs; i++) types[i] = q->op->type_lookup(arg, q->arg_map[i]); plan = SPI_prepare(q->sql.data, q->nargs, types); q->plan = SPI_saveplan(plan); } /* lookup values and run plan. returns result from SPI_execute_plan() */ int qb_execute(struct QueryBuilder *q, void *arg) { Datum values[FUNC_MAX_ARGS]; char nulls[FUNC_MAX_ARGS]; int i; if (!q->plan) elog(ERROR, "QB: query not prepared yet"); for (i = 0; i < q->nargs; i++) { bool isnull = false; values[i] = q->op->value_lookup(arg, q->arg_map[i], &isnull); nulls[i] = isnull ? 'n' : ' '; } return SPI_execute_plan(q->plan, values, nulls, true, 0); } void qb_free(struct QueryBuilder *q) { if (!q) return; if (q->plan) SPI_freeplan(q->plan); if (q->sql.data) pfree(q->sql.data); pfree(q); } skytools-3.2.6/sql/pgq/triggers/stringutil.c0000644000000000000000000001354212426435645016103 0ustar /* * stringutil.c - some tools for string handling * * Copyright (c) 2007 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include "stringutil.h" #ifndef SET_VARSIZE #define SET_VARSIZE(x, len) VARATT_SIZEP(x) = len #endif StringInfo pgq_init_varbuf(void) { StringInfo buf; buf = makeStringInfo(); appendStringInfoString(buf, "XXXX"); return buf; } Datum pgq_finish_varbuf(StringInfo buf) { if (!buf) return (Datum)0; SET_VARSIZE(buf->data, buf->len); return PointerGetDatum(buf->data); } /* * Find a string in comma-separated list. * * It does not support space inside tokens. */ bool pgq_strlist_contains(const char *liststr, const char *str) { int c, len = strlen(str); const char *p, *listpos = liststr; loop: /* find string fragment, later check if actual token */ p = strstr(listpos, str); if (p == NULL) return false; /* move listpos further */ listpos = p + len; /* survive len=0 and avoid unneccesary compare */ if (*listpos) listpos++; /* check previous symbol */ if (p > liststr) { c = *(p - 1); if (!isspace(c) && c != ',') goto loop; } /* check following symbol */ c = p[len]; if (c != 0 && !isspace(c) && c != ',') goto loop; return true; } /* * quoting */ static int pgq_urlencode(char *dst, const uint8 *src, int srclen) { static const char hextbl[] = "0123456789abcdef"; const uint8 *end = src + srclen; char *p = dst; while (src < end) { unsigned c = *src++; if (c == ' ') { *p++ = '+'; } else if ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || c == '_' || c == '.' || c == '-') { *p++ = c; } else { *p++ = '%'; *p++ = hextbl[c >> 4]; *p++ = hextbl[c & 15]; } } return p - dst; } static int pgq_quote_literal(char *dst, const uint8 *src, int srclen) { const uint8 *cp1 = src, *src_end = src + srclen; char *cp2 = dst; bool is_ext = false; *cp2++ = '\''; while (cp1 < src_end) { int wl = pg_mblen((const char *)cp1); if (wl != 1) { while (wl-- > 0 && cp1 < src_end) *cp2++ = *cp1++; continue; } if (*cp1 == '\'') { *cp2++ = '\''; } else if (*cp1 == '\\') { if (!is_ext) { /* make room for 'E' */ memmove(dst + 1, dst, cp2 - dst); *dst = 'E'; is_ext = true; cp2++; } *cp2++ = '\\'; } *cp2++ = *cp1++; } *cp2++ = '\''; return cp2 - dst; } /* check if ident is keyword that needs quoting */ static bool is_keyword(const char *ident) { const ScanKeyword *kw; /* do the lookup */ #if PG_VERSION_NUM >= 80500 kw = ScanKeywordLookup(ident, ScanKeywords, NumScanKeywords); #else kw = ScanKeywordLookup(ident); #endif /* unreserved? */ #if PG_VERSION_NUM >= 80300 if (kw && kw->category == UNRESERVED_KEYWORD) return false; #endif /* found anything? */ return kw != NULL; } /* * pgq_quote_ident - Quote an identifier only if needed */ static int pgq_quote_ident(char *dst, const uint8 *src, int srclen) { /* * Can avoid quoting if ident starts with a lowercase letter or * underscore and contains only lowercase letters, digits, and * underscores, *and* is not any SQL keyword. Otherwise, supply * quotes. */ int nquotes = 0; bool safe; const char *ptr; char *optr; char ident[NAMEDATALEN + 1]; /* expect idents be not bigger than NAMEDATALEN */ if (srclen > NAMEDATALEN) srclen = NAMEDATALEN; memcpy(ident, src, srclen); ident[srclen] = 0; /* * would like to use macros here, but they might yield * unwanted locale-specific results... */ safe = ((ident[0] >= 'a' && ident[0] <= 'z') || ident[0] == '_'); for (ptr = ident; *ptr; ptr++) { char ch = *ptr; if ((ch >= 'a' && ch <= 'z') || (ch >= '0' && ch <= '9') || (ch == '_')) continue; /* okay */ safe = false; if (ch == '"') nquotes++; } if (safe) { if (is_keyword(ident)) safe = false; } optr = dst; if (!safe) *optr++ = '"'; for (ptr = ident; *ptr; ptr++) { char ch = *ptr; if (ch == '"') *optr++ = '"'; *optr++ = ch; } if (!safe) *optr++ = '"'; return optr - dst; } static char *start_append(StringInfo buf, int alloc_len) { enlargeStringInfo(buf, alloc_len); return buf->data + buf->len; } static void finish_append(StringInfo buf, int final_len) { if (buf->len + final_len > buf->maxlen) elog(FATAL, "buffer overflow"); buf->len += final_len; } static void tbuf_encode_data(StringInfo buf, const uint8 *data, int len, enum PgqEncode encoding) { int dlen = 0; char *dst; switch (encoding) { case TBUF_QUOTE_LITERAL: dst = start_append(buf, len * 2 + 3); dlen = pgq_quote_literal(dst, data, len); break; case TBUF_QUOTE_IDENT: dst = start_append(buf, len * 2 + 2); dlen = pgq_quote_ident(dst, data, len); break; case TBUF_QUOTE_URLENC: dst = start_append(buf, len * 3 + 2); dlen = pgq_urlencode(dst, data, len); break; default: elog(ERROR, "bad encoding"); } finish_append(buf, dlen); } void pgq_encode_cstring(StringInfo tbuf, const char *str, enum PgqEncode encoding) { if (str == NULL) elog(ERROR, "tbuf_encode_cstring: NULL"); tbuf_encode_data(tbuf, (const uint8 *)str, strlen(str), encoding); } skytools-3.2.6/sql/pgq/triggers/sqltriga.c0000644000000000000000000000460112426435645015521 0ustar /* * sqltriga.c - Smart SQL-logging trigger. * * Copyright (c) 2007 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include "common.h" #include "stringutil.h" PG_FUNCTION_INFO_V1(pgq_sqltriga); Datum pgq_sqltriga(PG_FUNCTION_ARGS); /* * PgQ log trigger, takes 2 arguments: * 1. queue name to be inserted to. * * Queue events will be in format: * ev_type - operation type, I/U/D/R * ev_data - urlencoded column values * ev_extra1 - table name * ev_extra2 - optional urlencoded backup */ Datum pgq_sqltriga(PG_FUNCTION_ARGS) { TriggerData *tg; PgqTriggerEvent ev; /* * Get the trigger call context */ if (!CALLED_AS_TRIGGER(fcinfo)) elog(ERROR, "pgq.sqltriga not called as trigger"); tg = (TriggerData *)(fcinfo->context); if (pgq_is_logging_disabled()) goto skip_it; /* * Connect to the SPI manager */ if (SPI_connect() < 0) elog(ERROR, "sqltriga: SPI_connect() failed"); pgq_prepare_event(&ev, tg, true); appendStringInfoChar(ev.field[EV_TYPE], ev.op_type); appendStringInfoString(ev.field[EV_EXTRA1], ev.info->table_name); /* * create sql and insert if interesting */ if (pgqtriga_make_sql(&ev, ev.field[EV_DATA])) pgq_insert_tg_event(&ev); if (SPI_finish() < 0) elog(ERROR, "SPI_finish failed"); /* * After trigger ignores result, * before trigger skips event if NULL. */ skip_it: if (TRIGGER_FIRED_AFTER(tg->tg_event) || ev.tgargs->skip) return PointerGetDatum(NULL); else if (TRIGGER_FIRED_BY_UPDATE(tg->tg_event)) return PointerGetDatum(tg->tg_newtuple); else return PointerGetDatum(tg->tg_trigtuple); } skytools-3.2.6/sql/pgq/triggers/logutriga.c0000644000000000000000000001400512426435645015667 0ustar /* * logutriga.c - Smart trigger that logs urlencoded changes. * * Copyright (c) 2007 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include "common.h" #include "stringutil.h" PG_FUNCTION_INFO_V1(pgq_logutriga); Datum pgq_logutriga(PG_FUNCTION_ARGS); /* need to ignore UPDATE where only ignored columns change */ static int is_interesting_change(PgqTriggerEvent *ev, TriggerData *tg) { HeapTuple old_row = tg->tg_trigtuple; HeapTuple new_row = tg->tg_newtuple; TupleDesc tupdesc = tg->tg_relation->rd_att; Datum old_value; Datum new_value; bool old_isnull; bool new_isnull; bool is_pk; int i, attkind_idx = -1; int ignore_count = 0; /* only UPDATE may need to be ignored */ if (!TRIGGER_FIRED_BY_UPDATE(tg->tg_event)) return 1; for (i = 0; i < tupdesc->natts; i++) { /* * Ignore dropped columns */ if (tupdesc->attrs[i]->attisdropped) continue; attkind_idx++; is_pk = pgqtriga_is_pkey(ev, i, attkind_idx); if (!is_pk && ev->tgargs->ignore_list == NULL) continue; old_value = SPI_getbinval(old_row, tupdesc, i + 1, &old_isnull); new_value = SPI_getbinval(new_row, tupdesc, i + 1, &new_isnull); /* * If old and new value are NULL, the column is unchanged */ if (old_isnull && new_isnull) continue; /* * If both are NOT NULL, we need to compare the values and skip * setting the column if equal */ if (!old_isnull && !new_isnull) { Oid opr_oid; FmgrInfo *opr_finfo_p; /* * Lookup the equal operators function call info using the * typecache if available */ TypeCacheEntry *type_cache; type_cache = lookup_type_cache(SPI_gettypeid(tupdesc, i + 1), TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO); opr_oid = type_cache->eq_opr; if (opr_oid == ARRAY_EQ_OP) opr_oid = InvalidOid; else opr_finfo_p = &(type_cache->eq_opr_finfo); /* * If we have an equal operator, use that to do binary * comparision. Else get the string representation of both * attributes and do string comparision. */ if (OidIsValid(opr_oid)) { if (DatumGetBool(FunctionCall2(opr_finfo_p, old_value, new_value))) continue; } else { char *old_strval = SPI_getvalue(old_row, tupdesc, i + 1); char *new_strval = SPI_getvalue(new_row, tupdesc, i + 1); if (strcmp(old_strval, new_strval) == 0) continue; } } if (is_pk) elog(ERROR, "primary key update not allowed"); if (pgqtriga_skip_col(ev, i, attkind_idx)) { /* this change should be ignored */ ignore_count++; continue; } /* a non-ignored column has changed */ return 1; } /* skip if only ignored column had changed */ if (ignore_count) return 0; /* do show NOP updates */ return 1; } void pgq_urlenc_row(PgqTriggerEvent *ev, HeapTuple row, StringInfo buf) { TriggerData *tg = ev->tgdata; TupleDesc tupdesc = tg->tg_relation->rd_att; bool first = true; int i; const char *col_ident, *col_value; int attkind_idx = -1; if (ev->op_type == 'R') return; for (i = 0; i < tg->tg_relation->rd_att->natts; i++) { /* Skip dropped columns */ if (tupdesc->attrs[i]->attisdropped) continue; attkind_idx++; if (pgqtriga_skip_col(ev, i, attkind_idx)) continue; if (first) first = false; else appendStringInfoChar(buf, '&'); /* quote column name */ col_ident = SPI_fname(tupdesc, i + 1); pgq_encode_cstring(buf, col_ident, TBUF_QUOTE_URLENC); /* quote column value */ col_value = SPI_getvalue(row, tupdesc, i + 1); if (col_value != NULL) { appendStringInfoChar(buf, '='); pgq_encode_cstring(buf, col_value, TBUF_QUOTE_URLENC); } } } /* * PgQ log trigger, takes 2 arguments: * 1. queue name to be inserted to. * * Queue events will be in format: * ev_type - operation type, I/U/D * ev_data - urlencoded column values * ev_extra1 - table name * ev_extra2 - optional urlencoded backup */ Datum pgq_logutriga(PG_FUNCTION_ARGS) { TriggerData *tg; struct PgqTriggerEvent ev; HeapTuple row; /* * Get the trigger call context */ if (!CALLED_AS_TRIGGER(fcinfo)) elog(ERROR, "pgq.logutriga not called as trigger"); tg = (TriggerData *)(fcinfo->context); if (TRIGGER_FIRED_BY_UPDATE(tg->tg_event)) row = tg->tg_newtuple; else row = tg->tg_trigtuple; if (pgq_is_logging_disabled()) goto skip_it; /* * Connect to the SPI manager */ if (SPI_connect() < 0) elog(ERROR, "logutriga: SPI_connect() failed"); pgq_prepare_event(&ev, tg, true); appendStringInfoString(ev.field[EV_EXTRA1], ev.info->table_name); appendStringInfoChar(ev.field[EV_TYPE], ev.op_type); if (ev.op_type != 'R') { appendStringInfoChar(ev.field[EV_TYPE], ':'); appendStringInfoString(ev.field[EV_TYPE], ev.pkey_list); } if (is_interesting_change(&ev, tg)) { /* * create type, data */ pgq_urlenc_row(&ev, row, ev.field[EV_DATA]); /* * Construct the parameter array and insert the log row. */ pgq_insert_tg_event(&ev); } if (SPI_finish() < 0) elog(ERROR, "SPI_finish failed"); /* * After trigger ignores result, * before trigger skips event if NULL. */ skip_it: if (TRIGGER_FIRED_AFTER(tg->tg_event) || ev.tgargs->skip) return PointerGetDatum(NULL); else return PointerGetDatum(row); } skytools-3.2.6/sql/pgq/triggers/makesql.c0000644000000000000000000002066512426435645015340 0ustar /* * makesql.c - generate partial SQL statement for row change. * * Copyright (c) 2007 Marko Kreen, Skype Technologies OÜ * * Based on Slony-I log trigger: * * Copyright (c) 2003-2006, PostgreSQL Global Development Group * Author: Jan Wieck, Afilias USA INC. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include "common.h" #include "stringutil.h" static void append_key_eq(StringInfo buf, const char *col_ident, const char *col_value) { if (col_value == NULL) elog(ERROR, "logtriga: Unexpected NULL key value"); pgq_encode_cstring(buf, col_ident, TBUF_QUOTE_IDENT); appendStringInfoChar(buf, '='); pgq_encode_cstring(buf, col_value, TBUF_QUOTE_LITERAL); } static void append_normal_eq(StringInfo buf, const char *col_ident, const char *col_value) { pgq_encode_cstring(buf, col_ident, TBUF_QUOTE_IDENT); appendStringInfoChar(buf, '='); if (col_value != NULL) pgq_encode_cstring(buf, col_value, TBUF_QUOTE_LITERAL); else appendStringInfoString(buf, "NULL"); } static void process_insert(PgqTriggerEvent *ev, StringInfo sql) { TriggerData *tg = ev->tgdata; HeapTuple new_row = tg->tg_trigtuple; TupleDesc tupdesc = tg->tg_relation->rd_att; int i; int need_comma = false; int attkind_idx; /* * Specify all the columns */ appendStringInfoChar(sql, '('); attkind_idx = -1; for (i = 0; i < tupdesc->natts; i++) { char *col_ident; /* Skip dropped columns */ if (tupdesc->attrs[i]->attisdropped) continue; /* Check if allowed by colstring */ attkind_idx++; if (pgqtriga_skip_col(ev, i, attkind_idx)) continue; if (need_comma) appendStringInfoChar(sql, ','); else need_comma = true; /* quote column name */ col_ident = SPI_fname(tupdesc, i + 1); pgq_encode_cstring(sql, col_ident, TBUF_QUOTE_IDENT); } /* * Append the string ") values (" */ appendStringInfoString(sql, ") values ("); /* * Append the values */ need_comma = false; attkind_idx = -1; for (i = 0; i < tupdesc->natts; i++) { char *col_value; /* Skip dropped columns */ if (tupdesc->attrs[i]->attisdropped) continue; /* Check if allowed by colstring */ attkind_idx++; if (pgqtriga_skip_col(ev, i, attkind_idx)) continue; if (need_comma) appendStringInfoChar(sql, ','); else need_comma = true; /* quote column value */ col_value = SPI_getvalue(new_row, tupdesc, i + 1); if (col_value == NULL) appendStringInfoString(sql, "null"); else pgq_encode_cstring(sql, col_value, TBUF_QUOTE_LITERAL); } /* * Terminate and done */ appendStringInfoChar(sql, ')'); } static int process_update(PgqTriggerEvent *ev, StringInfo sql) { TriggerData *tg = ev->tgdata; HeapTuple old_row = tg->tg_trigtuple; HeapTuple new_row = tg->tg_newtuple; TupleDesc tupdesc = tg->tg_relation->rd_att; Datum old_value; Datum new_value; bool old_isnull; bool new_isnull; char *col_ident; char *col_value; int i; int need_comma = false; int need_and = false; int attkind_idx; int ignore_count = 0; attkind_idx = -1; for (i = 0; i < tupdesc->natts; i++) { /* * Ignore dropped columns */ if (tupdesc->attrs[i]->attisdropped) continue; attkind_idx++; old_value = SPI_getbinval(old_row, tupdesc, i + 1, &old_isnull); new_value = SPI_getbinval(new_row, tupdesc, i + 1, &new_isnull); /* * If old and new value are NULL, the column is unchanged */ if (old_isnull && new_isnull) continue; /* * If both are NOT NULL, we need to compare the values and skip * setting the column if equal */ if (!old_isnull && !new_isnull) { Oid opr_oid; FmgrInfo *opr_finfo_p; /* * Lookup the equal operators function call info using the * typecache if available */ TypeCacheEntry *type_cache; type_cache = lookup_type_cache(SPI_gettypeid(tupdesc, i + 1), TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO); opr_oid = type_cache->eq_opr; if (opr_oid == ARRAY_EQ_OP) opr_oid = InvalidOid; else opr_finfo_p = &(type_cache->eq_opr_finfo); /* * If we have an equal operator, use that to do binary * comparision. Else get the string representation of both * attributes and do string comparision. */ if (OidIsValid(opr_oid)) { if (DatumGetBool(FunctionCall2(opr_finfo_p, old_value, new_value))) continue; } else { char *old_strval = SPI_getvalue(old_row, tupdesc, i + 1); char *new_strval = SPI_getvalue(new_row, tupdesc, i + 1); if (strcmp(old_strval, new_strval) == 0) continue; } } if (pgqtriga_is_pkey(ev, i, attkind_idx)) elog(ERROR, "primary key update not allowed"); if (pgqtriga_skip_col(ev, i, attkind_idx)) { /* this change should be ignored */ ignore_count++; continue; } if (need_comma) appendStringInfoChar(sql, ','); else need_comma = true; col_ident = SPI_fname(tupdesc, i + 1); col_value = SPI_getvalue(new_row, tupdesc, i + 1); append_normal_eq(sql, col_ident, col_value); } /* * It can happen that the only UPDATE an application does is to set a * column to the same value again. In that case, we'd end up here with * no columns in the SET clause yet. We add the first key column here * with it's old value to simulate the same for the replication * engine. */ if (!need_comma) { /* there was change in ignored columns, skip whole event */ if (ignore_count > 0) return 0; for (i = 0, attkind_idx = -1; i < tupdesc->natts; i++) { if (tupdesc->attrs[i]->attisdropped) continue; attkind_idx++; if (pgqtriga_is_pkey(ev, i, attkind_idx)) break; } col_ident = SPI_fname(tupdesc, i + 1); col_value = SPI_getvalue(old_row, tupdesc, i + 1); append_key_eq(sql, col_ident, col_value); } appendStringInfoString(sql, " where "); for (i = 0, attkind_idx = -1; i < tupdesc->natts; i++) { /* * Ignore dropped columns */ if (tupdesc->attrs[i]->attisdropped) continue; attkind_idx++; if (!pgqtriga_is_pkey(ev, i, attkind_idx)) continue; col_ident = SPI_fname(tupdesc, i + 1); col_value = SPI_getvalue(old_row, tupdesc, i + 1); if (need_and) appendStringInfoString(sql, " and "); else need_and = true; append_key_eq(sql, col_ident, col_value); } return 1; } static void process_delete(PgqTriggerEvent *ev, StringInfo sql) { TriggerData *tg = ev->tgdata; HeapTuple old_row = tg->tg_trigtuple; TupleDesc tupdesc = tg->tg_relation->rd_att; char *col_ident; char *col_value; int i; int need_and = false; int attkind_idx; for (i = 0, attkind_idx = -1; i < tupdesc->natts; i++) { if (tupdesc->attrs[i]->attisdropped) continue; attkind_idx++; if (!pgqtriga_is_pkey(ev, i, attkind_idx)) continue; col_ident = SPI_fname(tupdesc, i + 1); col_value = SPI_getvalue(old_row, tupdesc, i + 1); if (need_and) appendStringInfoString(sql, " and "); else need_and = true; append_key_eq(sql, col_ident, col_value); } } int pgqtriga_make_sql(PgqTriggerEvent *ev, StringInfo sql) { TriggerData *tg = ev->tgdata; TupleDesc tupdesc; int i; int attcnt; int need_event = 1; tupdesc = tg->tg_relation->rd_att; /* * Count number of active columns */ for (i = 0, attcnt = 0; i < tupdesc->natts; i++) { if (tupdesc->attrs[i]->attisdropped) continue; attcnt++; } /* * Determine cmdtype and op_data depending on the command type */ if (TRIGGER_FIRED_BY_INSERT(tg->tg_event)) { process_insert(ev, sql); } else if (TRIGGER_FIRED_BY_UPDATE(tg->tg_event)) { need_event = process_update(ev, sql); } else if (TRIGGER_FIRED_BY_DELETE(tg->tg_event)) { process_delete(ev, sql); } else if (TRIGGER_FIRED_BY_TRUNCATE(tg->tg_event)) { /* nothing to do for truncate */ } else elog(ERROR, "logtriga fired for unhandled event"); return need_event; } skytools-3.2.6/sql/pgq/triggers/pgq_triggers.sql0000644000000000000000000001054412426435645016750 0ustar -- ---------------------------------------------------------------------- -- Function: pgq.sqltriga() -- -- Trigger that generates queue events containing partial SQL. -- It autodetects table structure. -- -- Purpose: -- Replication events, that only need changed column values. -- -- Parameters: -- arg1 - queue name -- argX - any number of optional arg, in any order -- -- Optinal arguments: -- SKIP - The actual operation should be skipped (BEFORE trigger) -- ignore=col1[,col2] - don't look at the specified arguments -- pkey=col1[,col2] - Set pkey fields for the table, PK autodetection will be skipped -- backup - Put urlencoded contents of old row to ev_extra2 -- colname=EXPR - Override field value with SQL expression. Can reference table -- columns. colname can be: ev_type, ev_data, ev_extra1 .. ev_extra4 -- when=EXPR - If EXPR returns false, don't insert event. -- -- Queue event fields: -- ev_type - I/U/D -- ev_data - partial SQL statement -- ev_extra1 - table name -- ev_extra2 - optional urlencoded backup -- -- ---------------------------------------------------------------------- CREATE OR REPLACE FUNCTION pgq.sqltriga() RETURNS trigger AS '$libdir/pgq_triggers', 'pgq_sqltriga' LANGUAGE C; -- ---------------------------------------------------------------------- -- Function: pgq.logutriga() -- -- Trigger function that puts row data in urlencoded form into queue. -- -- Purpose: -- Used as producer for several PgQ standard consumers (cube_dispatcher, -- queue_mover, table_dispatcher). Basically for cases where the -- consumer wants to parse the event and look at the actual column values. -- -- Trigger parameters: -- arg1 - queue name -- argX - any number of optional arg, in any order -- -- Optinal arguments: -- SKIP - The actual operation should be skipped (BEFORE trigger) -- ignore=col1[,col2] - don't look at the specified arguments -- pkey=col1[,col2] - Set pkey fields for the table, autodetection will be skipped -- backup - Put urlencoded contents of old row to ev_extra2 -- colname=EXPR - Override field value with SQL expression. Can reference table -- columns. colname can be: ev_type, ev_data, ev_extra1 .. ev_extra4 -- when=EXPR - If EXPR returns false, don't insert event. -- -- Queue event fields: -- ev_type - I/U/D ':' pkey_column_list -- ev_data - column values urlencoded -- ev_extra1 - table name -- ev_extra2 - optional urlencoded backup -- -- Regular listen trigger example: -- > CREATE TRIGGER triga_nimi AFTER INSERT OR UPDATE ON customer -- > FOR EACH ROW EXECUTE PROCEDURE pgq.logutriga('qname'); -- -- Redirect trigger example: -- > CREATE TRIGGER triga_nimi BEFORE INSERT OR UPDATE ON customer -- > FOR EACH ROW EXECUTE PROCEDURE pgq.logutriga('qname', 'SKIP'); -- ---------------------------------------------------------------------- CREATE OR REPLACE FUNCTION pgq.logutriga() RETURNS TRIGGER AS '$libdir/pgq_triggers', 'pgq_logutriga' LANGUAGE C; ---- disable obsolete trigger -- ---------------------------------------------------------------------- -- Function - pgq.logtriga() -- -- (Obsolete) Non-automatic SQL trigger. It puts row data in partial SQL form into -- queue. It does not auto-detect table structure, it needs to be passed -- as trigger arg. -- -- Purpose: -- Used by Londiste to generate replication events. The "partial SQL" -- format is more compact than the urlencoded format but cannot be -- parsed, only applied. Which is fine for Londiste. -- -- Parameters: -- arg1 - queue name -- arg2 - column type spec string where each column corresponds to one char (k/v/i). -- if spec string is shorter than column list, rest of columns default to 'i'. -- -- Column types: -- k - pkey column -- v - normal data column -- i - ignore column -- -- Queue event fields: -- ev_type - I/U/D -- ev_data - partial SQL statement -- ev_extra1 - table name -- -- ---------------------------------------------------------------------- -- CREATE OR REPLACE FUNCTION pgq.logtriga() RETURNS trigger -- AS '$libdir/pgq_triggers', 'pgq_logtriga' LANGUAGE C; skytools-3.2.6/sql/pgq/triggers/parsesql.c0000644000000000000000000001014612426435645015526 0ustar #ifndef TEST #include #else #include #include #endif #include "parsesql.h" /* * Small SQL tokenizer. For cases where flex/bison is overkill. * * To simplify futher processing, it merges words separated * with dots together. That also means it does not support * whitespace/comments before and after dot. * * Otherwise it's relatively compatible with main parser. * * Return value: * -1 - error * 0 - end of string * 1..255 - single char * >255 - token code */ int sql_tokenizer(const char *sql, int *len_p, bool stdstr) { const char *p = sql; int tok; *len_p = 0; if (!*p) { /* end */ return 0; } else if (isspace(*p) || (p[0] == '-' && p[1] == '-') || (p[0] == '/' && p[1] == '*')) { /* whitespace */ tok = T_SPACE; while (1) { if (p[0] == '-' && p[1] == '-') { /* line comment */ while (*p && *p != '\n') p++; } else if (p[0] == '/' && p[1] == '*') { /* c-comment, potentially nested */ int level = 1; p += 2; while (level) { if (p[0] == '*' && p[1] == '/') { level--; p += 2; } else if (p[0] == '/' && p[1] == '*') { level++; p += 2; } else if (!*p) { return -1; } else p++; } } else if (isspace(p[0])) { /* plain whitespace */ while (isspace(p[0])) p++; } else break; } } else if ((p[0] == '\'' && !stdstr) || ((p[0] == 'E' || p[0] == 'e') && p[1] == '\'')) { /* extended string */ tok = T_STRING; if (p[0] == '\'') p++; else p += 2; for (; *p; p++) { if (p[0] == '\'') { if (p[1] == '\'') p++; else break; } else if (p[0] == '\\') { if (!p[1]) return -1; p++; } } if (*p++ != '\'') return -1; } else if (p[0] == '\'' && stdstr) { /* standard string */ tok = T_STRING; for (p++; *p; p++) { if (p[0] == '\'') { if (p[1] == '\'') p++; else break; } } if (*p++ != '\'') return -1; } else if (isalpha(*p) || (*p == '_')) { /* plain/quoted words separated with '.' */ tok = T_WORD; while (1) { /* plain ident */ while (*p && (isalnum(*p) || *p == '_' || *p == '.')) p++; if (p[0] == '"') { /* quoted ident */ for (p++; *p; p++) { if (p[0] == '"') { if (p[1] == '"') p++; else break; } } if (*p++ != '"') return -1; } else if (p[0] == '.') { tok = T_FQIDENT; p++; } else { break; } } } else if (isdigit(p[0]) || (p[0] == '.' && isdigit(p[1]))) { /* number */ tok = T_NUMBER; while (*p) { if (isdigit(*p) || *p == '.') { p++; } else if ((*p == 'e' || *p == 'E')) { if (p[1] == '.' || p[1] == '+' || p[1] == '-') { p += 2; } else if (isdigit(p[1])) { p += 2; } else break; } else break; } } else if (p[0] == '$') { if (isdigit(p[1])) { /* dollar ident */ tok = T_WORD; for (p += 2; *p; p++) { if (!isdigit(*p)) break; } } else if (isalpha(p[1]) || p[1] == '_' || p[1] == '$') { /* dollar quote */ const char *p2, *p3; tok = T_STRING; p2 = strchr(p+1, '$'); if (!p2) return -1; p3 = ++p2; while (1) { p3 = strchr(p3, '$'); if (!p3) return -1; if (strncmp(p3, p, p2 - p) == 0) break; p3++; } p = p3 + (p2 - p); } else return -1; } else if (*p == '.') { /* disallow standalone dot - seems ident parsing missed it */ return -1; } else { /* return other symbols as-is */ tok = *p++; } *len_p = p - sql; return tok; } #ifdef TEST /* * test code */ const char test_sql[] = "\r\n\t " "-- foo\n" "/*/**//* nested *//**/*/\n" "select 1, .600, $1, $150, 1.44e+.1," " bzo.\"fo'\"\".o\".zoo.fa, " "E'a\\\\ \\000 \\' baz '''," "'foo''baz' from \"quoted\"\"id\";" "$$$$ $_$ $x$ $ $_ $_$" ; int main(void) { const char *sql = test_sql; int tlen; int tok; bool stdstr = false; while (1) { tok = sql_tokenizer(sql, &tlen, stdstr); if (tok == 0) { printf("EOF\n"); break; } else if (tok < 0) { printf("ERR\n"); return 1; } printf("tok=%d len=%d str=<%.*s>\n", tok, tlen, tlen, sql); sql += tlen; } return 0; } #endif skytools-3.2.6/sql/pgq/triggers/logtriga.c0000644000000000000000000000426512426435645015511 0ustar /* * logtriga.c - Dumb SQL logging trigger. * * Copyright (c) 2007 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include "common.h" #include "stringutil.h" PG_FUNCTION_INFO_V1(pgq_logtriga); Datum pgq_logtriga(PG_FUNCTION_ARGS); /* * PGQ log trigger, takes 2 arguments: * 1. queue name to be inserted to. * 2. column type string * * Queue events will be in format: * ev_type - operation type, I/U/D * ev_data - partial SQL describing operation * ev_extra1 - table name */ Datum pgq_logtriga(PG_FUNCTION_ARGS) { TriggerData *tg; PgqTriggerEvent ev; /* * Get the trigger call context */ if (!CALLED_AS_TRIGGER(fcinfo)) elog(ERROR, "pgq.logtriga not called as trigger"); tg = (TriggerData *)(fcinfo->context); if (!TRIGGER_FIRED_AFTER(tg->tg_event)) elog(ERROR, "pgq.logtriga must be fired AFTER"); if (pgq_is_logging_disabled()) goto skip_it; /* * Connect to the SPI manager */ if (SPI_connect() < 0) elog(ERROR, "logtriga: SPI_connect() failed"); pgq_prepare_event(&ev, tg, false); appendStringInfoChar(ev.field[EV_TYPE], ev.op_type); appendStringInfoString(ev.field[EV_EXTRA1], ev.info->table_name); /* * create sql and insert if interesting */ if (pgqtriga_make_sql(&ev, ev.field[EV_DATA])) pgq_insert_tg_event(&ev); if (SPI_finish() < 0) elog(ERROR, "SPI_finish failed"); skip_it: return PointerGetDatum(NULL); } skytools-3.2.6/sql/pgq/triggers/Makefile0000644000000000000000000000045112426435645015166 0ustar MODULE_big = pgq_triggers SRCS = logtriga.c logutriga.c sqltriga.c \ common.c makesql.c stringutil.c \ parsesql.c qbuilder.c OBJS = $(SRCS:.c=.o) DATA = pgq_triggers.sql PG_CONFIG = pg_config PGXS = $(shell $(PG_CONFIG) --pgxs) include $(PGXS) cs: cscope -b -f .cscope.out *.c skytools-3.2.6/sql/pgq/triggers/stringutil.h0000644000000000000000000000046212426435645016105 0ustar enum PgqEncode { TBUF_QUOTE_IDENT, TBUF_QUOTE_LITERAL, TBUF_QUOTE_URLENC, }; StringInfo pgq_init_varbuf(void); Datum pgq_finish_varbuf(StringInfo buf); bool pgq_strlist_contains(const char *liststr, const char *str); void pgq_encode_cstring(StringInfo tbuf, const char *str, enum PgqEncode encoding); skytools-3.2.6/sql/pgq/triggers/parsesql.h0000644000000000000000000000024612426435645015533 0ustar /* multi-char tokens */ enum SqlToken { T_SPACE = 257, T_STRING, T_NUMBER, T_WORD, T_FQIDENT, }; int sql_tokenizer(const char *sql, int *len_p, bool stdstr); skytools-3.2.6/sql/pgq/triggers/common.c0000644000000000000000000004742112426435645015172 0ustar /* * common.c - functions used by all trigger variants. * * Copyright (c) 2007 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #if PG_VERSION_NUM >= 90300 #include #endif #include "common.h" #include "stringutil.h" #include "qbuilder.h" /* * Module tag */ #ifdef PG_MODULE_MAGIC PG_MODULE_MAGIC; #endif /* memcmp is ok on NameData fields */ #define is_magic_field(s) (memcmp(s, "_pgq_ev_", 8) == 0) static void make_query(struct PgqTriggerEvent *ev, int fld, const char *arg); static void override_fields(struct PgqTriggerEvent *ev); /* * primary key info */ static bool tbl_cache_invalid; static MemoryContext tbl_cache_ctx; static HTAB *tbl_cache_map; static const char pkey_sql[] = "SELECT k.attnum, k.attname FROM pg_index i, pg_attribute k" " WHERE i.indrelid = $1 AND k.attrelid = i.indexrelid" " AND i.indisprimary AND k.attnum > 0 AND NOT k.attisdropped" " ORDER BY k.attnum"; static void *pkey_plan; static void relcache_reset_cb(Datum arg, Oid relid); /* * helper for queue insertion. * * does not support NULL arguments. */ void pgq_simple_insert(const char *queue_name, Datum ev_type, Datum ev_data, Datum ev_extra1, Datum ev_extra2, Datum ev_extra3, Datum ev_extra4) { Datum values[7]; char nulls[7]; static void *plan = NULL; int res; if (!plan) { const char *sql; Oid types[7] = { TEXTOID, TEXTOID, TEXTOID, TEXTOID, TEXTOID, TEXTOID, TEXTOID }; sql = "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)"; plan = SPI_saveplan(SPI_prepare(sql, 7, types)); if (plan == NULL) elog(ERROR, "logtriga: SPI_prepare() failed"); } values[0] = DirectFunctionCall1(textin, (Datum)queue_name); values[1] = ev_type; values[2] = ev_data; values[3] = ev_extra1; values[4] = ev_extra2; values[5] = ev_extra3; values[6] = ev_extra4; nulls[0] = ' '; nulls[1] = ev_type ? ' ' : 'n'; nulls[2] = ev_data ? ' ' : 'n'; nulls[3] = ev_extra1 ? ' ' : 'n'; nulls[4] = ev_extra2 ? ' ' : 'n'; nulls[5] = ev_extra3 ? ' ' : 'n'; nulls[6] = ev_extra4 ? ' ' : 'n'; res = SPI_execute_plan(plan, values, nulls, false, 0); if (res != SPI_OK_SELECT) elog(ERROR, "call of pgq.insert_event failed"); } static void fill_magic_columns(PgqTriggerEvent *ev) { TriggerData *tg = ev->tgdata; int i; char *col_name, *col_value; StringInfo *dst = NULL; TupleDesc tupdesc = tg->tg_relation->rd_att; HeapTuple row; if (TRIGGER_FIRED_BY_UPDATE(tg->tg_event)) row = tg->tg_newtuple; else row = tg->tg_trigtuple; for (i = 0; i < tupdesc->natts; i++) { /* Skip dropped columns */ if (tupdesc->attrs[i]->attisdropped) continue; col_name = NameStr(tupdesc->attrs[i]->attname); if (!is_magic_field(col_name)) continue; if (strcmp(col_name, "_pgq_ev_type") == 0) dst = &ev->field[EV_TYPE]; else if (strcmp(col_name, "_pgq_ev_data") == 0) dst = &ev->field[EV_DATA]; else if (strcmp(col_name, "_pgq_ev_extra1") == 0) dst = &ev->field[EV_EXTRA1]; else if (strcmp(col_name, "_pgq_ev_extra2") == 0) dst = &ev->field[EV_EXTRA2]; else if (strcmp(col_name, "_pgq_ev_extra3") == 0) dst = &ev->field[EV_EXTRA3]; else if (strcmp(col_name, "_pgq_ev_extra4") == 0) dst = &ev->field[EV_EXTRA4]; else elog(ERROR, "Unknown magic column: %s", col_name); col_value = SPI_getvalue(row, tupdesc, i + 1); if (col_value != NULL) { *dst = pgq_init_varbuf(); appendStringInfoString(*dst, col_value); } else { *dst = NULL; } } } void pgq_insert_tg_event(PgqTriggerEvent *ev) { if (ev->tgargs->custom_fields) fill_magic_columns(ev); override_fields(ev); if (ev->skip_event) return; pgq_simple_insert(ev->queue_name, pgq_finish_varbuf(ev->field[EV_TYPE]), pgq_finish_varbuf(ev->field[EV_DATA]), pgq_finish_varbuf(ev->field[EV_EXTRA1]), pgq_finish_varbuf(ev->field[EV_EXTRA2]), pgq_finish_varbuf(ev->field[EV_EXTRA3]), pgq_finish_varbuf(ev->field[EV_EXTRA4])); } static char *find_table_name(Relation rel) { Oid nsoid = rel->rd_rel->relnamespace; char namebuf[NAMEDATALEN * 2 + 3]; HeapTuple ns_tup; Form_pg_namespace ns_struct; const char *tname = NameStr(rel->rd_rel->relname); const char *nspname; /* find namespace info */ ns_tup = SearchSysCache(NAMESPACEOID, ObjectIdGetDatum(nsoid), 0, 0, 0); if (!HeapTupleIsValid(ns_tup)) elog(ERROR, "Cannot find namespace %u", nsoid); ns_struct = (Form_pg_namespace) GETSTRUCT(ns_tup); nspname = NameStr(ns_struct->nspname); /* fill name */ snprintf(namebuf, sizeof(namebuf), "%s.%s", nspname, tname); ReleaseSysCache(ns_tup); return pstrdup(namebuf); } static void init_pkey_plan(void) { Oid types[1] = { OIDOID }; pkey_plan = SPI_saveplan(SPI_prepare(pkey_sql, 1, types)); if (pkey_plan == NULL) elog(ERROR, "pgq_triggers: SPI_prepare() failed"); } static void init_cache(void) { HASHCTL ctl; int flags; int max_tables = 128; /* * create own context */ tbl_cache_ctx = AllocSetContextCreate(TopMemoryContext, "pgq_triggers table info", ALLOCSET_SMALL_MINSIZE, ALLOCSET_SMALL_INITSIZE, ALLOCSET_SMALL_MAXSIZE); /* * init pkey cache. */ MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(struct PgqTableInfo); ctl.hash = oid_hash; flags = HASH_ELEM | HASH_FUNCTION; tbl_cache_map = hash_create("pgq_triggers pkey cache", max_tables, &ctl, flags); } /* * Prepare utility plans and plan cache. */ static void init_module(void) { static int callback_init = 0; /* do full reset if requested */ if (tbl_cache_invalid) { if (tbl_cache_map) hash_destroy(tbl_cache_map); if (tbl_cache_ctx) MemoryContextDelete(tbl_cache_ctx); tbl_cache_map = NULL; tbl_cache_ctx = NULL; tbl_cache_invalid = false; } /* re-initialize cache */ if (tbl_cache_ctx) return; init_cache(); /* * Rest is done only once. */ if (!pkey_plan) init_pkey_plan(); if (!callback_init) { CacheRegisterRelcacheCallback(relcache_reset_cb, (Datum)0); callback_init = 1; } } /* * Fill table information in hash table. */ static void fill_tbl_info(Relation rel, struct PgqTableInfo *info) { StringInfo pkeys; Datum values[1]; const char *name = find_table_name(rel); TupleDesc desc; HeapTuple row; bool isnull; int res, i, attno; /* load pkeys */ values[0] = ObjectIdGetDatum(rel->rd_id); res = SPI_execute_plan(pkey_plan, values, NULL, false, 0); if (res != SPI_OK_SELECT) elog(ERROR, "pkey_plan exec failed"); /* * Fill info */ desc = SPI_tuptable->tupdesc; pkeys = makeStringInfo(); info->n_pkeys = SPI_processed; info->table_name = MemoryContextStrdup(tbl_cache_ctx, name); info->pkey_attno = MemoryContextAlloc(tbl_cache_ctx, info->n_pkeys * sizeof(int)); for (i = 0; i < SPI_processed; i++) { row = SPI_tuptable->vals[i]; attno = DatumGetInt16(SPI_getbinval(row, desc, 1, &isnull)); name = SPI_getvalue(row, desc, 2); info->pkey_attno[i] = attno; if (i > 0) appendStringInfoChar(pkeys, ','); appendStringInfoString(pkeys, name); } info->pkey_list = MemoryContextStrdup(tbl_cache_ctx, pkeys->data); info->tg_cache = NULL; } static void clean_info(struct PgqTableInfo *info, bool found) { struct PgqTriggerInfo *tg, *tmp = info->tg_cache; int i; if (!found) goto uninitialized; for (tg = info->tg_cache; tg; ) { tmp = tg->next; if (tg->ignore_list) pfree((void *)tg->ignore_list); if (tg->pkey_list) pfree((void *)tg->pkey_list); for (i = 0; i < EV_NFIELDS; i++) { if (tg->query[i]) qb_free(tg->query[i]); } pfree(tg); tg = tmp; } if (info->table_name) pfree(info->table_name); if (info->pkey_attno) pfree(info->pkey_attno); if (info->pkey_list) pfree((void *)info->pkey_list); uninitialized: info->tg_cache = NULL; info->table_name = NULL; info->pkey_attno = NULL; info->pkey_list = NULL; info->n_pkeys = 0; info->invalid = true; } /* * the callback can be launched any time from signal callback, * only minimal tagging can be done here. */ static void relcache_reset_cb(Datum arg, Oid relid) { if (relid == InvalidOid) { tbl_cache_invalid = true; } else if (tbl_cache_map && !tbl_cache_invalid) { struct PgqTableInfo *entry; entry = hash_search(tbl_cache_map, &relid, HASH_FIND, NULL); if (entry) entry->invalid = true; } } /* * fetch table struct from cache. */ static struct PgqTableInfo *find_table_info(Relation rel) { struct PgqTableInfo *entry; bool found = false; init_module(); entry = hash_search(tbl_cache_map, &rel->rd_id, HASH_ENTER, &found); if (!found || entry->invalid) { clean_info(entry, found); /* * During fill_tbl_info() 2 events can happen: * - table info reset * - exception * To survive both, always clean struct and tag * as invalid but differently from reset. */ entry->invalid = 2; /* find info */ fill_tbl_info(rel, entry); /* * If no reset happened, it's valid. Actual reset * is postponed to next call. */ if (entry->invalid == 2) entry->invalid = false; } return entry; } static struct PgqTriggerInfo *find_trigger_info(struct PgqTableInfo *info, Oid tgoid, bool create) { struct PgqTriggerInfo *tgargs = info->tg_cache; for (tgargs = info->tg_cache; tgargs; tgargs = tgargs->next) { if (tgargs->tgoid == tgoid) return tgargs; } if (!create) return NULL; tgargs = MemoryContextAllocZero(tbl_cache_ctx, sizeof(*tgargs)); tgargs->tgoid = tgoid; tgargs->next = info->tg_cache; info->tg_cache = tgargs; return tgargs; } static void parse_newstyle_args(PgqTriggerEvent *ev, TriggerData *tg) { int i; /* * parse args */ for (i = 1; i < tg->tg_trigger->tgnargs; i++) { const char *arg = tg->tg_trigger->tgargs[i]; if (strcmp(arg, "SKIP") == 0) ev->tgargs->skip = true; else if (strncmp(arg, "ignore=", 7) == 0) ev->tgargs->ignore_list = MemoryContextStrdup(tbl_cache_ctx, arg + 7); else if (strncmp(arg, "pkey=", 5) == 0) ev->tgargs->pkey_list = MemoryContextStrdup(tbl_cache_ctx, arg + 5); else if (strcmp(arg, "backup") == 0) ev->tgargs->backup = true; else if (strcmp(arg, "deny") == 0) ev->tgargs->deny = true; else if (strncmp(arg, "ev_extra4=", 10) == 0) make_query(ev, EV_EXTRA4, arg + 10); else if (strncmp(arg, "ev_extra3=", 10) == 0) make_query(ev, EV_EXTRA3, arg + 10); else if (strncmp(arg, "ev_extra2=", 10) == 0) make_query(ev, EV_EXTRA2, arg + 10); else if (strncmp(arg, "ev_extra1=", 10) == 0) make_query(ev, EV_EXTRA1, arg + 10); else if (strncmp(arg, "ev_data=", 8) == 0) make_query(ev, EV_DATA, arg + 8); else if (strncmp(arg, "ev_type=", 8) == 0) make_query(ev, EV_TYPE, arg + 8); else if (strncmp(arg, "when=", 5) == 0) make_query(ev, EV_WHEN, arg + 5); else elog(ERROR, "bad param to pgq trigger"); } if (ev->op_type == 'R') { if (ev->tgargs->ignore_list) elog(ERROR, "Column ignore does not make sense for truncate trigger"); if (ev->tgargs->pkey_list) elog(ERROR, "Custom pkey_list does not make sense for truncate trigger"); if (ev->tgargs->backup) elog(ERROR, "Backup does not make sense for truncate trigger"); } } static void parse_oldstyle_args(PgqTriggerEvent *ev, TriggerData *tg) { const char *kpos; int attcnt, i; TupleDesc tupdesc = tg->tg_relation->rd_att; if (tg->tg_trigger->tgnargs < 2 || tg->tg_trigger->tgnargs > 3) elog(ERROR, "pgq.logtriga must be used with 2 or 3 args"); ev->attkind = tg->tg_trigger->tgargs[1]; ev->attkind_len = strlen(ev->attkind); if (tg->tg_trigger->tgnargs > 2) ev->table_name = tg->tg_trigger->tgargs[2]; /* * Count number of active columns */ tupdesc = tg->tg_relation->rd_att; for (i = 0, attcnt = 0; i < tupdesc->natts; i++) { if (!tupdesc->attrs[i]->attisdropped) attcnt++; } /* * look if last pkey column exists */ kpos = strrchr(ev->attkind, 'k'); if (kpos == NULL) elog(ERROR, "need at least one key column"); if (kpos - ev->attkind >= attcnt) elog(ERROR, "key column does not exist"); } /* * parse trigger arguments. */ void pgq_prepare_event(struct PgqTriggerEvent *ev, TriggerData *tg, bool newstyle) { memset(ev, 0, sizeof(*ev)); /* * Check trigger calling conventions */ if (TRIGGER_FIRED_BY_TRUNCATE(tg->tg_event)) { if (!TRIGGER_FIRED_FOR_STATEMENT(tg->tg_event)) elog(ERROR, "pgq tRuncate trigger must be fired FOR EACH STATEMENT"); } else if (!TRIGGER_FIRED_FOR_ROW(tg->tg_event)) { elog(ERROR, "pgq Ins/Upd/Del trigger must be fired FOR EACH ROW"); } if (tg->tg_trigger->tgnargs < 1) elog(ERROR, "pgq trigger must have destination queue as argument"); /* * check operation type */ if (TRIGGER_FIRED_BY_INSERT(tg->tg_event)) ev->op_type = 'I'; else if (TRIGGER_FIRED_BY_UPDATE(tg->tg_event)) ev->op_type = 'U'; else if (TRIGGER_FIRED_BY_DELETE(tg->tg_event)) ev->op_type = 'D'; else if (TRIGGER_FIRED_BY_TRUNCATE(tg->tg_event)) ev->op_type = 'R'; else elog(ERROR, "unknown event for pgq trigger"); /* * load table info */ ev->tgdata = tg; ev->info = find_table_info(tg->tg_relation); ev->table_name = ev->info->table_name; ev->pkey_list = ev->info->pkey_list; ev->queue_name = tg->tg_trigger->tgargs[0]; /* * parse args, newstyle args are cached */ ev->tgargs = find_trigger_info(ev->info, tg->tg_trigger->tgoid, true); if (newstyle) { if (!ev->tgargs->finalized) parse_newstyle_args(ev, tg); if (ev->tgargs->pkey_list) ev->pkey_list = ev->tgargs->pkey_list; /* Check if we have pkey */ if (ev->op_type == 'U' || ev->op_type == 'D') { if (ev->pkey_list[0] == 0) elog(ERROR, "Update/Delete on table without pkey"); } } else { parse_oldstyle_args(ev, tg); } ev->tgargs->finalized = true; /* * Check if BEFORE/AFTER makes sense. */ if (ev->tgargs->skip) { if (TRIGGER_FIRED_AFTER(tg->tg_event)) elog(ERROR, "SKIP does not work in AFTER trigger."); } else { if (!TRIGGER_FIRED_AFTER(tg->tg_event)) /* dont care ??? */ ; } if (ev->tgargs->deny) { elog(ERROR, "Table '%s' to queue '%s': change not allowed (%c)", ev->table_name, ev->queue_name, ev->op_type); } /* * init data */ ev->field[EV_TYPE] = pgq_init_varbuf(); ev->field[EV_DATA] = pgq_init_varbuf(); ev->field[EV_EXTRA1] = pgq_init_varbuf(); /* * Do the backup, if requested. */ if (ev->tgargs->backup) { ev->field[EV_EXTRA2] = pgq_init_varbuf(); pgq_urlenc_row(ev, tg->tg_trigtuple, ev->field[EV_EXTRA2]); } } /* * Check if column should be skipped */ bool pgqtriga_skip_col(PgqTriggerEvent *ev, int i, int attkind_idx) { TriggerData *tg = ev->tgdata; TupleDesc tupdesc; const char *name; tupdesc = tg->tg_relation->rd_att; if (tupdesc->attrs[i]->attisdropped) return true; name = NameStr(tupdesc->attrs[i]->attname); if (is_magic_field(name)) { ev->tgargs->custom_fields = 1; return true; } if (ev->attkind) { if (attkind_idx >= ev->attkind_len) return true; return ev->attkind[attkind_idx] == 'i'; } else if (ev->tgargs->ignore_list) { return pgq_strlist_contains(ev->tgargs->ignore_list, name); } return false; } /* * Check if column is pkey. */ bool pgqtriga_is_pkey(PgqTriggerEvent *ev, int i, int attkind_idx) { TriggerData *tg = ev->tgdata; TupleDesc tupdesc; const char *name; if (ev->attkind) { if (attkind_idx >= ev->attkind_len) return false; return ev->attkind[attkind_idx] == 'k'; } else if (ev->pkey_list) { tupdesc = tg->tg_relation->rd_att; if (tupdesc->attrs[i]->attisdropped) return false; name = NameStr(tupdesc->attrs[i]->attname); if (is_magic_field(name)) { ev->tgargs->custom_fields = 1; return false; } return pgq_strlist_contains(ev->pkey_list, name); } return false; } /* * Check if trigger action should be skipped. */ bool pgq_is_logging_disabled(void) { #if defined(PG_VERSION_NUM) && PG_VERSION_NUM >= 80300 /* * Force-disable the trigger in local replication role. In other * roles rely on the enabled/disabled status of the trigger. */ if (SessionReplicationRole == SESSION_REPLICATION_ROLE_LOCAL) return true; #endif return false; } /* * Callbacks for queryfilter */ static int tg_name_lookup(void *arg, const char *name, int len) { TriggerData *tg = arg; TupleDesc desc = tg->tg_relation->rd_att; char namebuf[NAMEDATALEN + 1]; int nr; if (len >= sizeof(namebuf)) return -1; memcpy(namebuf, name, len); namebuf[len] = 0; nr = SPI_fnumber(desc, namebuf); if (nr > 0) return nr; return -1; } static Oid tg_type_lookup(void *arg, int spi_nr) { TriggerData *tg = arg; TupleDesc desc = tg->tg_relation->rd_att; return SPI_gettypeid(desc, spi_nr); } static Datum tg_value_lookup(void *arg, int spi_nr, bool *isnull) { TriggerData *tg = arg; TupleDesc desc = tg->tg_relation->rd_att; HeapTuple row; if (TRIGGER_FIRED_BY_UPDATE(tg->tg_event)) row = tg->tg_newtuple; else row = tg->tg_trigtuple; return SPI_getbinval(row, desc, spi_nr, isnull); } static const struct QueryBuilderOps tg_ops = { tg_name_lookup, tg_type_lookup, tg_value_lookup, }; /* * Custom override queries for field values. */ static void make_query(struct PgqTriggerEvent *ev, int fld, const char *arg) { struct TriggerData *tg = ev->tgdata; struct PgqTriggerInfo *tgargs; struct QueryBuilder *q; Oid tgoid = tg->tg_trigger->tgoid; const char *pfx = "select "; if (ev->op_type == 'R') elog(ERROR, "Custom expressions do not make sense for truncater trigger"); /* make sure tgargs exists */ if (!ev->tgargs) ev->tgargs = find_trigger_info(ev->info, tgoid, true); tgargs = ev->tgargs; if (tgargs->query[fld]) { /* seems we already have prepared query */ if (tgargs->query[fld]->plan) return; /* query is broken, last prepare failed? */ qb_free(tgargs->query[fld]); tgargs->query[fld] = NULL; } /* allocate query in right context */ q = qb_create(&tg_ops, tbl_cache_ctx); /* attach immediately */ tgargs->query[fld] = q; /* prepare the query */ qb_add_raw(q, pfx, strlen(pfx)); qb_add_parse(q, arg, tg); qb_prepare(q, tg); } static void override_fields(struct PgqTriggerEvent *ev) { TriggerData *tg = ev->tgdata; int res, i; char *val; /* no overrides */ if (!ev->tgargs) return; for (i = 0; i < EV_NFIELDS; i++) { if (!ev->tgargs->query[i]) continue; res = qb_execute(ev->tgargs->query[i], tg); if (res != SPI_OK_SELECT) elog(ERROR, "Override query failed"); if (SPI_processed != 1) elog(ERROR, "Expect 1 row from override query, got %d", SPI_processed); /* special handling for EV_WHEN */ if (i == EV_WHEN) { bool isnull; Oid oid = SPI_gettypeid(SPI_tuptable->tupdesc, 1); Datum res; if (oid != BOOLOID) elog(ERROR, "when= query result must be boolean, got=%u", oid); res = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull); if (isnull) elog(ERROR, "when= should not be NULL"); if (DatumGetBool(res) == 0) ev->skip_event = true; continue; } /* normal field */ val = SPI_getvalue(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1); if (ev->field[i]) { pfree(ev->field[i]->data); pfree(ev->field[i]); ev->field[i] = NULL; } if (val) { ev->field[i] = pgq_init_varbuf(); appendStringInfoString(ev->field[i], val); } } } skytools-3.2.6/sql/pgq/triggers/qbuilder.h0000644000000000000000000000175512426435645015516 0ustar #include /* * Callbacks that to argument name/type/value lookups. */ struct QueryBuilderOps { /* returns name index or < 0 if unknown. str is not null-terminated */ int (*name_lookup)(void *arg, const char *str, int len); /* returns type oid for nr that .name_lookup returned */ Oid (*type_lookup)(void *arg, int nr); /* returns value for nr that .name_lookup returned */ Datum (*value_lookup)(void *arg, int nr, bool *isnull); }; /* * Parsed query */ struct QueryBuilder { StringInfoData sql; bool stdstr; const struct QueryBuilderOps *op; void *plan; int nargs; int maxargs; int *arg_map; }; struct QueryBuilder *qb_create(const struct QueryBuilderOps *ops, MemoryContext ctx); void qb_add_raw(struct QueryBuilder *q, const char *str, int len); void qb_add_parse(struct QueryBuilder *q, const char *str, void *arg); void qb_free(struct QueryBuilder *q); void qb_prepare(struct QueryBuilder *q, void *arg); int qb_execute(struct QueryBuilder *q, void *arg); skytools-3.2.6/sql/pgq/triggers/common.h0000644000000000000000000000425512426435645015175 0ustar enum PgqFields { EV_TYPE = 0, EV_DATA, EV_EXTRA1, EV_EXTRA2, EV_EXTRA3, EV_EXTRA4, EV_WHEN, EV_NFIELDS }; /* * Per-event temporary data. */ struct PgqTriggerEvent { char op_type; /* overridable fields */ // fixme: check proper usage const char *table_name; const char *queue_name; const char *pkey_list; /* no cache for old-style args */ const char *attkind; int attkind_len; /* cached per-table info */ struct PgqTableInfo *info; /* cached per-trigger args */ struct PgqTriggerInfo *tgargs; /* current event data */ TriggerData *tgdata; /* result fields */ StringInfo field[EV_NFIELDS]; /* if 'when=' query fails */ bool skip_event; }; typedef struct PgqTriggerEvent PgqTriggerEvent; /* * Per trigger cached info, stored under table cache, * so that invalidate can drop it. */ struct PgqTriggerInfo { struct PgqTriggerInfo *next; Oid tgoid; bool finalized; bool skip; bool backup; bool custom_fields; bool deny; const char *ignore_list; const char *pkey_list; struct QueryBuilder *query[EV_NFIELDS]; }; /* * Per-table cached info. * * Per-trigger info should be cached under tg_cache. */ struct PgqTableInfo { Oid reloid; /* must be first, used by htab */ int n_pkeys; /* number of pkeys */ const char *pkey_list; /* pk column name list */ int *pkey_attno; /* pk column positions */ char *table_name; /* schema-quelified table name */ int invalid; /* set if the info was invalidated */ struct PgqTriggerInfo *tg_cache; }; /* common.c */ void pgq_prepare_event(struct PgqTriggerEvent *ev, TriggerData *tg, bool newstyle); void pgq_simple_insert(const char *queue_name, Datum ev_type, Datum ev_data, Datum ev_extra1, Datum ev_extra2, Datum ev_extra3, Datum ev_extra4); bool pgqtriga_skip_col(PgqTriggerEvent *ev, int i, int attkind_idx); bool pgqtriga_is_pkey(PgqTriggerEvent *ev, int i, int attkind_idx); void pgq_insert_tg_event(PgqTriggerEvent *ev); bool pgq_is_logging_disabled(void); /* makesql.c */ int pgqtriga_make_sql(PgqTriggerEvent *ev, StringInfo sql); /* logutriga.c */ void pgq_urlenc_row(PgqTriggerEvent *ev, HeapTuple row, StringInfo buf); #ifndef TRIGGER_FIRED_BY_TRUNCATE #define TRIGGER_FIRED_BY_TRUNCATE(tg) 0 #endif skytools-3.2.6/sql/pgq/docs/0000755000000000000000000000000012426435645012630 5ustar skytools-3.2.6/sql/pgq/docs/Topics.txt0000644000000000000000000000650512426435645014640 0ustar Format: 1.52 # This is the Natural Docs topics file for this project. If you change anything # here, it will apply to THIS PROJECT ONLY. If you'd like to change something # for all your projects, edit the Topics.txt in Natural Docs' Config directory # instead. # If you'd like to prevent keywords from being recognized by Natural Docs, you # can do it like this: # Ignore Keywords: [keyword], [keyword], ... # # Or you can use the list syntax like how they are defined: # Ignore Keywords: # [keyword] # [keyword], [plural keyword] # ... #------------------------------------------------------------------------------- # SYNTAX: # # Topic Type: [name] # Alter Topic Type: [name] # Creates a new topic type or alters one from the main file. Each type gets # its own index and behavior settings. Its name can have letters, numbers, # spaces, and these charaters: - / . ' # # Plural: [name] # Sets the plural name of the topic type, if different. # # Keywords: # [keyword] # [keyword], [plural keyword] # ... # Defines or adds to the list of keywords for the topic type. They may only # contain letters, numbers, and spaces and are not case sensitive. Plural # keywords are used for list topics. You can redefine keywords found in the # main topics file. # # Index: [yes|no] # Whether the topics get their own index. Defaults to yes. Everything is # included in the general index regardless of this setting. # # Scope: [normal|start|end|always global] # How the topics affects scope. Defaults to normal. # normal - Topics stay within the current scope. # start - Topics start a new scope for all the topics beneath it, # like class topics. # end - Topics reset the scope back to global for all the topics # beneath it. # always global - Topics are defined as global, but do not change the scope # for any other topics. # # Class Hierarchy: [yes|no] # Whether the topics are part of the class hierarchy. Defaults to no. # # Page Title If First: [yes|no] # Whether the topic's title becomes the page title if it's the first one in # a file. Defaults to no. # # Break Lists: [yes|no] # Whether list topics should be broken into individual topics in the output. # Defaults to no. # # Can Group With: [type], [type], ... # Defines a list of topic types that this one can possibly be grouped with. # Defaults to none. #------------------------------------------------------------------------------- # The following topics are defined in the main file, if you'd like to alter # their behavior or add keywords: # # Generic, Class, Interface, Section, File, Group, Function, Variable, # Property, Type, Constant, Enumeration, Event, Delegate, Macro, # Database, Database Table, Database View, Database Index, Database # Cursor, Database Trigger, Cookie, Build Target # If you add something that you think would be useful to other developers # and should be included in Natural Docs by default, please e-mail it to # topics [at] naturaldocs [dot] org. Topic Type: Schema Plural: Schemas Index: No Scope: Start Class Hierarchy: Yes Keywords: schema, schemas Alter Topic Type: Function Add Keywords: public function internal function Alter Topic Type: File Index: No skytools-3.2.6/sql/pgq/docs/Languages.txt0000644000000000000000000001202112426435645015273 0ustar Format: 1.52 # This is the Natural Docs languages file for this project. If you change # anything here, it will apply to THIS PROJECT ONLY. If you'd like to change # something for all your projects, edit the Languages.txt in Natural Docs' # Config directory instead. Ignore Extension: sql #------------------------------------------------------------------------------- # SYNTAX: # # Unlike other Natural Docs configuration files, in this file all comments # MUST be alone on a line. Some languages deal with the # character, so you # cannot put comments on the same line as content. # # Also, all lists are separated with spaces, not commas, again because some # languages may need to use them. # # Language: [name] # Alter Language: [name] # Defines a new language or alters an existing one. Its name can use any # characters. If any of the properties below have an add/replace form, you # must use that when using Alter Language. # # The language Shebang Script is special. It's entry is only used for # extensions, and files with those extensions have their shebang (#!) lines # read to determine the real language of the file. Extensionless files are # always treated this way. # # The language Text File is also special. It's treated as one big comment # so you can put Natural Docs content in them without special symbols. Also, # if you don't specify a package separator, ignored prefixes, or enum value # behavior, it will copy those settings from the language that is used most # in the source tree. # # Extensions: [extension] [extension] ... # [Add/Replace] Extensions: [extension] [extension] ... # Defines the file extensions of the language's source files. You can # redefine extensions found in the main languages file. You can use * to # mean any undefined extension. # # Shebang Strings: [string] [string] ... # [Add/Replace] Shebang Strings: [string] [string] ... # Defines a list of strings that can appear in the shebang (#!) line to # designate that it's part of the language. You can redefine strings found # in the main languages file. # # Ignore Prefixes in Index: [prefix] [prefix] ... # [Add/Replace] Ignored Prefixes in Index: [prefix] [prefix] ... # # Ignore [Topic Type] Prefixes in Index: [prefix] [prefix] ... # [Add/Replace] Ignored [Topic Type] Prefixes in Index: [prefix] [prefix] ... # Specifies prefixes that should be ignored when sorting symbols in an # index. Can be specified in general or for a specific topic type. # #------------------------------------------------------------------------------ # For basic language support only: # # Line Comments: [symbol] [symbol] ... # Defines a space-separated list of symbols that are used for line comments, # if any. # # Block Comments: [opening sym] [closing sym] [opening sym] [closing sym] ... # Defines a space-separated list of symbol pairs that are used for block # comments, if any. # # Package Separator: [symbol] # Defines the default package separator symbol. The default is a dot. # # [Topic Type] Prototype Enders: [symbol] [symbol] ... # When defined, Natural Docs will attempt to get a prototype from the code # immediately following the topic type. It stops when it reaches one of # these symbols. Use \n for line breaks. # # Line Extender: [symbol] # Defines the symbol that allows a prototype to span multiple lines if # normally a line break would end it. # # Enum Values: [global|under type|under parent] # Defines how enum values are referenced. The default is global. # global - Values are always global, referenced as 'value'. # under type - Values are under the enum type, referenced as # 'package.enum.value'. # under parent - Values are under the enum's parent, referenced as # 'package.value'. # # Perl Package: [perl package] # Specifies the Perl package used to fine-tune the language behavior in ways # too complex to do in this file. # #------------------------------------------------------------------------------ # For full language support only: # # Full Language Support: [perl package] # Specifies the Perl package that has the parsing routines necessary for full # language support. # #------------------------------------------------------------------------------- # The following languages are defined in the main file, if you'd like to alter # them: # # Text File, Shebang Script, C/C++, C#, Java, JavaScript, Perl, Python, # PHP, SQL, Visual Basic, Pascal, Assembly, Ada, Tcl, Ruby, Makefile, # ActionScript, ColdFusion, R, Fortran # If you add a language that you think would be useful to other developers # and should be included in Natural Docs by default, please e-mail it to # languages [at] naturaldocs [dot] org. Language: PLPGSQL Extension: sql Line Comment: -- Block Comment: /* */ Enum Values: Global Function Prototype Enders: , ; ) $ ' Variable Prototype Enders: , ; ) := default Default DEFAULT Database Index Prototype Enders: , ; ) Database Trigger Prototype Enders: begin Begin BEGIN skytools-3.2.6/sql/pgq/docs/Menu.txt0000644000000000000000000000373112426435645014301 0ustar Format: 1.52 Title: PgQ SubTitle: Database API # You can add a footer to your documentation like this: # Footer: [text] # If you want to add a copyright notice, this would be the place to do it. # You can add a timestamp to your documentation like one of these: # Timestamp: Generated on month day, year # Timestamp: Updated mm/dd/yyyy # Timestamp: Last updated mon day # # m - One or two digit month. January is "1" # mm - Always two digit month. January is "01" # mon - Short month word. January is "Jan" # month - Long month word. January is "January" # d - One or two digit day. 1 is "1" # dd - Always two digit day. 1 is "01" # day - Day with letter extension. 1 is "1st" # yy - Two digit year. 2006 is "06" # yyyy - Four digit year. 2006 is "2006" # year - Four digit year. 2006 is "2006" # -------------------------------------------------------------------------- # # Cut and paste the lines below to change the order in which your files # appear on the menu. Don't worry about adding or removing files, Natural # Docs will take care of that. # # You can further organize the menu by grouping the entries. Add a # "Group: [name] {" line to start a group, and add a "}" to end it. # # You can add text and web links to the menu by adding "Text: [text]" and # "Link: [name] ([URL])" lines, respectively. # # The formatting and comments are auto-generated, so don't worry about # neatness when editing the file. Natural Docs will clean it up the next # time it is run. When working with groups, just deal with the braces and # forget about the indentation and comments. # # -------------------------------------------------------------------------- File: Public Functions (external.sql) File: Public Triggers (triggers.sql) File: Internal Functions (internal.sql) File: Internal Tables (schema.sql) Group: Index { Index: Everything Database Table Index: Database Tables Function Index: Functions } # Group: Index skytools-3.2.6/sql/pgq/structure/0000755000000000000000000000000012426435645013740 5ustar skytools-3.2.6/sql/pgq/structure/triggers.sql0000644000000000000000000000017212426435645016307 0ustar -- Section: Public Triggers -- Group: Trigger Functions -- \i triggers/pgq.logutriga.sql \i triggers/pgq_triggers.sql skytools-3.2.6/sql/pgq/structure/ext_postproc.sql0000644000000000000000000000101312426435645017205 0ustar -- tag data objects as dumpable SELECT pg_catalog.pg_extension_config_dump('pgq.queue', ''); SELECT pg_catalog.pg_extension_config_dump('pgq.consumer', ''); SELECT pg_catalog.pg_extension_config_dump('pgq.tick', ''); SELECT pg_catalog.pg_extension_config_dump('pgq.subscription', ''); SELECT pg_catalog.pg_extension_config_dump('pgq.event_template', ''); SELECT pg_catalog.pg_extension_config_dump('pgq.retry_queue', ''); -- This needs pg_dump 9.1.7+ SELECT pg_catalog.pg_extension_config_dump('pgq.batch_id_seq', ''); skytools-3.2.6/sql/pgq/structure/uninstall_pgq.sql0000644000000000000000000000006412426435645017341 0ustar -- brute-force uninstall drop schema pgq cascade; skytools-3.2.6/sql/pgq/structure/tables.sql0000644000000000000000000002266212426435645015743 0ustar -- ---------------------------------------------------------------------- -- Section: Internal Tables -- -- Overview: -- pgq.queue - Queue configuration -- pgq.consumer - Consumer names -- pgq.subscription - Consumer registrations -- pgq.tick - Per-queue snapshots (ticks) -- pgq.event_* - Data tables -- pgq.retry_queue - Events to be retried later -- -- -- Standard triggers store events in the pgq.event_* data tables -- There is one top event table pgq.event_ for each queue -- inherited from pgq.event_template wuith three tables for actual data -- pgq.event__0 to pgq.event__2. -- -- The active table is rotated at interval, so that if all the consubers -- have passed some poin the oldes one can be emptied using TRUNCATE command -- for efficiency -- -- -- ---------------------------------------------------------------------- set client_min_messages = 'warning'; set default_with_oids = 'off'; -- drop schema if exists pgq cascade; create schema pgq; -- ---------------------------------------------------------------------- -- Table: pgq.consumer -- -- Name to id lookup for consumers -- -- Columns: -- co_id - consumer's id for internal usage -- co_name - consumer's id for external usage -- ---------------------------------------------------------------------- create table pgq.consumer ( co_id serial, co_name text not null, constraint consumer_pkey primary key (co_id), constraint consumer_name_uq UNIQUE (co_name) ); -- ---------------------------------------------------------------------- -- Table: pgq.queue -- -- Information about available queues -- -- Columns: -- queue_id - queue id for internal usage -- queue_name - queue name visible outside -- queue_ntables - how many data tables the queue has -- queue_cur_table - which data table is currently active -- queue_rotation_period - period for data table rotation -- queue_switch_step1 - tx when rotation happened -- queue_switch_step2 - tx after rotation was committed -- queue_switch_time - time when switch happened -- queue_external_ticker - ticks come from some external sources -- queue_ticker_paused - ticker is paused -- queue_disable_insert - disallow pgq.insert_event() -- queue_ticker_max_count - batch should not contain more events -- queue_ticker_max_lag - events should not age more -- queue_ticker_idle_period - how often to tick when no events happen -- queue_per_tx_limit - Max number of events single TX can insert -- queue_data_pfx - prefix for data table names -- queue_event_seq - sequence for event id's -- queue_tick_seq - sequence for tick id's -- ---------------------------------------------------------------------- create table pgq.queue ( queue_id serial, queue_name text not null, queue_ntables integer not null default 3, queue_cur_table integer not null default 0, queue_rotation_period interval not null default '2 hours', queue_switch_step1 bigint not null default txid_current(), queue_switch_step2 bigint default txid_current(), queue_switch_time timestamptz not null default now(), queue_external_ticker boolean not null default false, queue_disable_insert boolean not null default false, queue_ticker_paused boolean not null default false, queue_ticker_max_count integer not null default 500, queue_ticker_max_lag interval not null default '3 seconds', queue_ticker_idle_period interval not null default '1 minute', queue_per_tx_limit integer, queue_data_pfx text not null, queue_event_seq text not null, queue_tick_seq text not null, constraint queue_pkey primary key (queue_id), constraint queue_name_uq unique (queue_name) ); -- ---------------------------------------------------------------------- -- Table: pgq.tick -- -- Snapshots for event batching -- -- Columns: -- tick_queue - queue id whose tick it is -- tick_id - ticks id (per-queue) -- tick_time - time when tick happened -- tick_snapshot - transaction state -- tick_event_seq - last value for event seq -- ---------------------------------------------------------------------- create table pgq.tick ( tick_queue int4 not null, tick_id bigint not null, tick_time timestamptz not null default now(), tick_snapshot txid_snapshot not null default txid_current_snapshot(), tick_event_seq bigint not null, -- may be NULL on upgraded dbs constraint tick_pkey primary key (tick_queue, tick_id), constraint tick_queue_fkey foreign key (tick_queue) references pgq.queue (queue_id) ); -- ---------------------------------------------------------------------- -- Sequence: pgq.batch_id_seq -- -- Sequence for batch id's. -- ---------------------------------------------------------------------- create sequence pgq.batch_id_seq; -- ---------------------------------------------------------------------- -- Table: pgq.subscription -- -- Consumer registration on a queue. -- -- Columns: -- -- sub_id - subscription id for internal usage -- sub_queue - queue id -- sub_consumer - consumer's id -- sub_last_tick - last tick the consumer processed -- sub_batch - shortcut for queue_id/consumer_id/tick_id -- sub_next_tick - batch end pos -- ---------------------------------------------------------------------- create table pgq.subscription ( sub_id serial not null, sub_queue int4 not null, sub_consumer int4 not null, sub_last_tick bigint, sub_active timestamptz not null default now(), sub_batch bigint, sub_next_tick bigint, constraint subscription_pkey primary key (sub_queue, sub_consumer), constraint subscription_batch_idx unique (sub_batch), constraint sub_queue_fkey foreign key (sub_queue) references pgq.queue (queue_id), constraint sub_consumer_fkey foreign key (sub_consumer) references pgq.consumer (co_id) ); -- ---------------------------------------------------------------------- -- Table: pgq.event_template -- -- Parent table for all event tables -- -- Columns: -- ev_id - event's id, supposed to be unique per queue -- ev_time - when the event was inserted -- ev_txid - transaction id which inserted the event -- ev_owner - subscription id that wanted to retry this -- ev_retry - how many times the event has been retried, NULL for new events -- ev_type - consumer/producer can specify what the data fields contain -- ev_data - data field -- ev_extra1 - extra data field -- ev_extra2 - extra data field -- ev_extra3 - extra data field -- ev_extra4 - extra data field -- ---------------------------------------------------------------------- create table pgq.event_template ( ev_id bigint not null, ev_time timestamptz not null, ev_txid bigint not null default txid_current(), ev_owner int4, ev_retry int4, ev_type text, ev_data text, ev_extra1 text, ev_extra2 text, ev_extra3 text, ev_extra4 text ); -- ---------------------------------------------------------------------- -- Table: pgq.retry_queue -- -- Events to be retried. When retry time reaches, they will -- be put back into main queue. -- -- Columns: -- ev_retry_after - time when it should be re-inserted to main queue -- ev_queue - queue id, used to speed up event copy into queue -- * - same as pgq.event_template -- ---------------------------------------------------------------------- create table pgq.retry_queue ( ev_retry_after timestamptz not null, ev_queue int4 not null, like pgq.event_template, constraint rq_pkey primary key (ev_owner, ev_id), constraint rq_queue_id_fkey foreign key (ev_queue) references pgq.queue (queue_id) ); alter table pgq.retry_queue alter column ev_owner set not null; alter table pgq.retry_queue alter column ev_txid drop not null; create index rq_retry_idx on pgq.retry_queue (ev_retry_after); skytools-3.2.6/sql/pgq/structure/ext_unpackaged.sql0000644000000000000000000000055112426435645017444 0ustar ALTER EXTENSION pgq ADD SCHEMA pgq; ALTER EXTENSION pgq ADD TABLE pgq.queue; ALTER EXTENSION pgq ADD TABLE pgq.consumer; ALTER EXTENSION pgq ADD TABLE pgq.tick; ALTER EXTENSION pgq ADD TABLE pgq.subscription; ALTER EXTENSION pgq ADD TABLE pgq.event_template; ALTER EXTENSION pgq ADD TABLE pgq.retry_queue; ALTER EXTENSION pgq ADD SEQUENCE pgq.batch_id_seq; skytools-3.2.6/sql/pgq/structure/upgrade.sql0000644000000000000000000000012612426435645016107 0ustar \i structure/func_internal.sql \i structure/func_public.sql \i structure/triggers.sql skytools-3.2.6/sql/pgq/structure/grants.ini0000644000000000000000000000512112426435645015736 0ustar [GrantFu] roles = pgq_reader, pgq_writer, pgq_admin, public [1.public] on.functions = %(pgq_generic_fns)s public = execute [2.consumer] on.functions = %(pgq_read_fns)s pgq_reader = execute [3.producer] on.functions = %(pgq_write_fns)s pgq_writer = execute [4.admin] on.functions = %(pgq_system_fns)s pgq_admin = execute [5.meta.tables] on.tables = pgq.consumer, pgq.queue, pgq.tick, pgq.subscription pgq_admin = select, insert, update, delete pgq_reader = select public = select [5.event.tables] on.tables = pgq.event_template pgq_reader = select pgq_admin = select, insert, truncate # drop public access to events public = [6.retry.event] on.tables = pgq.retry_queue pgq_admin = select, insert, update, delete # # define various groups of functions # [DEFAULT] pgq_generic_fns = pgq.seq_getval(text), pgq.get_queue_info(), pgq.get_queue_info(text), pgq.get_consumer_info(), pgq.get_consumer_info(text), pgq.get_consumer_info(text, text), pgq.quote_fqname(text), pgq.version() pgq_read_fns = pgq.batch_event_sql(bigint), pgq.batch_event_tables(bigint), pgq.find_tick_helper(int4, int8, timestamptz, int8, int8, interval), pgq.register_consumer(text, text), pgq.register_consumer_at(text, text, bigint), pgq.unregister_consumer(text, text), pgq.next_batch_info(text, text), pgq.next_batch(text, text), pgq.next_batch_custom(text, text, interval, int4, interval), pgq.get_batch_events(bigint), pgq.get_batch_info(bigint), pgq.get_batch_cursor(bigint, text, int4, text), pgq.get_batch_cursor(bigint, text, int4), pgq.event_retry(bigint, bigint, timestamptz), pgq.event_retry(bigint, bigint, integer), pgq.batch_retry(bigint, integer), pgq.force_tick(text), pgq.finish_batch(bigint) pgq_write_fns = pgq.insert_event(text, text, text), pgq.insert_event(text, text, text, text, text, text, text), pgq.current_event_table(text), pgq.sqltriga(), pgq.logutriga() pgq_system_fns = pgq.ticker(text, bigint, timestamptz, bigint), pgq.ticker(text), pgq.ticker(), pgq.maint_retry_events(), pgq.maint_rotate_tables_step1(text), pgq.maint_rotate_tables_step2(), pgq.maint_tables_to_vacuum(), pgq.maint_operations(), pgq.upgrade_schema(), pgq.grant_perms(text), pgq._grant_perms_from(text,text,text,text), pgq.tune_storage(text), pgq.seq_setval(text, int8), pgq.create_queue(text), pgq.drop_queue(text, bool), pgq.drop_queue(text), pgq.set_queue_config(text, text, text), pgq.insert_event_raw(text, bigint, timestamptz, integer, integer, text, text, text, text, text, text), pgq.event_retry_raw(text, text, timestamptz, bigint, timestamptz, integer, text, text, text, text, text, text) skytools-3.2.6/sql/pgq/structure/grants.sql0000644000000000000000000000060412426435645015757 0ustar grant usage on schema pgq to public; -- old default grants grant select on table pgq.consumer to public; grant select on table pgq.queue to public; grant select on table pgq.tick to public; grant select on table pgq.queue to public; grant select on table pgq.subscription to public; grant select on table pgq.event_template to public; grant select on table pgq.retry_queue to public; skytools-3.2.6/sql/pgq/structure/func_internal.sql0000644000000000000000000000150512426435645017311 0ustar -- Section: Internal Functions -- install & launch schema upgrade \i functions/pgq.upgrade_schema.sql select pgq.upgrade_schema(); -- Group: Low-level event handling \i functions/pgq.batch_event_sql.sql \i functions/pgq.batch_event_tables.sql \i functions/pgq.event_retry_raw.sql \i functions/pgq.find_tick_helper.sql -- \i functions/pgq.insert_event_raw.sql \i lowlevel/pgq_lowlevel.sql -- Group: Ticker \i functions/pgq.ticker.sql -- Group: Periodic maintenence \i functions/pgq.maint_retry_events.sql \i functions/pgq.maint_rotate_tables.sql \i functions/pgq.maint_tables_to_vacuum.sql \i functions/pgq.maint_operations.sql -- Group: Random utility functions \i functions/pgq.grant_perms.sql \i functions/pgq.tune_storage.sql \i functions/pgq.force_tick.sql \i functions/pgq.seq_funcs.sql \i functions/pgq.quote_fqname.sql skytools-3.2.6/sql/pgq/structure/install.sql0000644000000000000000000000021012426435645016120 0ustar \i structure/tables.sql \i structure/func_internal.sql \i structure/func_public.sql \i structure/triggers.sql \i structure/grants.sql skytools-3.2.6/sql/pgq/structure/func_public.sql0000644000000000000000000000426312426435645016757 0ustar -- ---------------------------------------------------------------------- -- Section: Public Functions -- -- The queue is used by a client in the following steps -- -- 1. Register the client (a queue consumer) -- -- pgq.register_consumer(queue_name, consumer_id) -- -- 2. run a loop createing, consuming and closing batches -- -- 2a. pgq.get_batch_events(batch_id int8) - returns an int8 batch handle -- -- 2b. pgq.get_batch_events(batch_id int8) - returns a set of events for current batch -- -- the event structure is :(ev_id int8, ev_time timestamptz, ev_txid int8, ev_retry -- int4, ev_type text, ev_data text, ev_extra1, ev_extra2, ev_extra3, ev_extra4) -- -- 2c. if any of the events need to be tagged as failed, use a the function -- -- pgq.event_failed(batch_id int8, event_id int8, reason text) -- -- 2d. if you want the event to be re-inserted in the main queue afrer N seconds, use -- -- pgq.event_retry(batch_id int8, event_id int8, retry_seconds int4) -- -- 2e. To finish processing and release the batch, use -- -- pgq.finish_batch(batch_id int8) -- -- Until this is not done, the consumer will get same batch again. -- -- After calling finish_batch consumer cannot do any operations with events -- of that batch. All operations must be done before. -- -- -- ---------------------------------------------------------------------- -- Group: Queue creation \i functions/pgq.create_queue.sql \i functions/pgq.drop_queue.sql \i functions/pgq.set_queue_config.sql -- Group: Event publishing \i functions/pgq.insert_event.sql \i functions/pgq.current_event_table.sql -- Group: Subscribing to queue \i functions/pgq.register_consumer.sql \i functions/pgq.unregister_consumer.sql -- Group: Batch processing \i functions/pgq.next_batch.sql \i functions/pgq.get_batch_events.sql \i functions/pgq.get_batch_cursor.sql \i functions/pgq.event_retry.sql \i functions/pgq.batch_retry.sql \i functions/pgq.finish_batch.sql -- Group: General info functions \i functions/pgq.get_queue_info.sql \i functions/pgq.get_consumer_info.sql \i functions/pgq.version.sql \i functions/pgq.get_batch_info.sql skytools-3.2.6/sql/common-pgxs.mk0000644000000000000000000000710212426435645013711 0ustar # PGXS does not support modules that are supposed # to run on different Postgres versions very well. # Here are some workarounds for them. # Variables that are used when extensions are available Extension_data ?= Extension_data_built ?= $(EXTENSION)--$(EXT_VERSION).sql $(EXTENSION)--unpackaged--$(EXT_VERSION).sql Extension_regress ?= # Variables that are used when extensions are not available Contrib_data ?= Contrib_data_built = $(EXTENSION).sql $(EXTENSION).upgrade.sql \ structure/newgrants_$(EXTENSION).sql \ structure/oldgrants_$(EXTENSION).sql Contrib_regress ?= EXT_VERSION ?= EXT_OLD_VERSIONS ?= Extension_upgrade_files = $(if $(EXT_OLD_VERSIONS),$(foreach v,$(EXT_OLD_VERSIONS),$(EXTENSION)--$(v)--$(EXT_VERSION).sql)) Extension_data_built += $(Extension_upgrade_files) # Should the Contrib* files installed (under ../contrib/) # even when extensions are available? Contrib_install_always ?= yes # # switch variables # IfExt = $(if $(filter 8.% 9.0%,$(MAJORVERSION)8.3),$(2),$(1)) DATA = $(call IfExt,$(Extension_data),$(Contrib_data)) DATA_built = $(call IfExt,$(Extension_data_built),$(Contrib_data_built)) REGRESS = $(call IfExt,$(Extension_regress),$(Contrib_regress)) EXTRA_CLEAN += $(call IfExt,$(Contrib_data_built),$(Extension_data_built)) test.dump # have deterministic dbname for regtest database override CONTRIB_TESTDB = regression REGRESS_OPTS = --load-language=plpgsql --dbname=$(CONTRIB_TESTDB) # # Calculate actual sql files # SQLS = $(shell $(AWK) '/^\\i / { print $$2; }' structure/install.sql) FUNCS = $(shell $(AWK) '/^\\i / { print $$2; }' $(SQLS)) SRCS = $(SQLS) $(FUNCS) # # load PGXS # PG_CONFIG ?= pg_config PGXS = $(shell $(PG_CONFIG) --pgxs) include $(PGXS) # when compiling locally and with postgres without python, # the variable may be empty PYTHON := $(if $(PYTHON),$(PYTHON),python) # # common tools # NDOC = NaturalDocs NDOCARGS = -r -o html docs/html -p docs -i docs/sql CATSQL = $(PYTHON) ../../scripts/catsql.py GRANTFU = $(PYTHON) ../../scripts/grantfu.py # # build rules, in case Contrib data must be always installed # ifeq ($(call IfExt,$(Contrib_install_always),no),yes) all: $(Contrib_data) $(Contrib_data_built) installdirs: installdirs-old-contrib install: install-old-contrib installdirs-old-contrib: $(MKDIR_P) '$(DESTDIR)$(datadir)/contrib' install-old-contrib: $(Contrib_data) $(Contrib_data_built) installdirs-old-contrib $(INSTALL_DATA) $(addprefix $(srcdir)/, $(Contrib_data)) $(Contrib_data_built) '$(DESTDIR)$(datadir)/contrib/' endif # # regtest shortcuts # test: install $(MAKE) installcheck || { filterdiff --format=unified regression.diffs | less; exit 1; } pg_dump regression > test.dump ack: cp results/*.out expected/ cleandox: rm -rf docs/html docs/Data docs/sql clean: cleandox .PHONY: test ack installdirs-old-contrib install-old-contrib cleandox dox # # common files # $(EXTENSION)--$(EXT_VERSION).sql: $(EXTENSION).sql structure/ext_postproc.sql $(CATSQL) $^ > $@ $(EXTENSION)--unpackaged--$(EXT_VERSION).sql: $(EXTENSION).upgrade.sql structure/ext_unpackaged.sql structure/ext_postproc.sql $(CATSQL) $^ > $@ $(EXTENSION).sql: $(SRCS) $(CATSQL) structure/install.sql > $@ $(EXTENSION).upgrade.sql: $(SRCS) $(CATSQL) structure/upgrade.sql > $@ ifneq ($(Extension_upgrade_files),) $(Extension_upgrade_files): $(EXTENSION).upgrade.sql cp $< $@ endif structure/newgrants_$(EXTENSION).sql: structure/grants.ini $(GRANTFU) -t -r -d $< > $@ structure/oldgrants_$(EXTENSION).sql: structure/grants.ini structure/grants.sql echo "begin;" > $@ $(GRANTFU) -R -o $< >> $@ cat structure/grants.sql >> $@ echo "commit;" >> $@ skytools-3.2.6/sql/londiste/0000755000000000000000000000000012426435645012732 5ustar skytools-3.2.6/sql/londiste/londiste.control0000644000000000000000000000024312426435645016154 0ustar # Londiste extensions comment = 'Londiste Replication' default_version = '3.2.4' relocatable = false superuser = true schema = 'pg_catalog' requires = 'pgq_node' skytools-3.2.6/sql/londiste/Makefile0000644000000000000000000000112012426435645014364 0ustar EXTENSION = londiste EXT_VERSION = 3.2.4 EXT_OLD_VERSIONS = 3.1 3.1.1 3.1.3 3.1.4 3.1.6 3.2 3.2.3 base_regress = londiste_provider londiste_subscriber \ londiste_fkeys londiste_execute londiste_seqs londiste_merge \ londiste_leaf londiste_create_part Contrib_regress = init_noext $(base_regress) Extension_regress = init_ext $(base_regress) include ../common-pgxs.mk dox: cleandox mkdir -p docs/html mkdir -p docs/sql $(CATSQL) --ndoc structure/tables.sql > docs/sql/schema.sql $(CATSQL) --ndoc structure/functions.sql > docs/sql/functions.sql $(NDOC) $(NDOCARGS) skytools-3.2.6/sql/londiste/functions/0000755000000000000000000000000012426435645014742 5ustar skytools-3.2.6/sql/londiste/functions/londiste.local_set_table_attrs.sql0000644000000000000000000000200512426435645023631 0ustar create or replace function londiste.local_set_table_attrs( in i_queue_name text, in i_table_name text, in i_table_attrs text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_set_table_attrs(3) -- -- Store urlencoded table attributes. -- -- Parameters: -- i_queue_name - cascaded queue name -- i_table - table name -- i_table_attrs - urlencoded attributes -- ---------------------------------------------------------------------- begin update londiste.table_info set table_attrs = i_table_attrs where queue_name = i_queue_name and table_name = i_table_name and local; if found then select 200, i_table_name || ': Table attributes stored' into ret_code, ret_note; else select 404, 'no such local table: ' || i_table_name into ret_code, ret_note; end if; return; end; $$ language plpgsql; skytools-3.2.6/sql/londiste/functions/londiste.local_remove_table.sql0000644000000000000000000000626412426435645023131 0ustar create or replace function londiste.local_remove_table( in i_queue_name text, in i_table_name text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_remove_table(2) -- -- Remove table. -- -- Parameters: -- i_queue_name - set name -- i_table_name - table name -- -- Returns: -- 200 - OK -- 404 - Table not found -- ---------------------------------------------------------------------- declare fq_table_name text; qtbl text; seqname text; tbl record; tbl_oid oid; pgver integer; begin fq_table_name := londiste.make_fqname(i_table_name); qtbl := londiste.quote_fqname(fq_table_name); tbl_oid := londiste.find_table_oid(i_table_name); show server_version_num into pgver; select local, dropped_ddl, merge_state into tbl from londiste.table_info where queue_name = i_queue_name and table_name = fq_table_name for update; if not found then select 400, 'Table not found: ' || fq_table_name into ret_code, ret_note; return; end if; if tbl.local then perform londiste.drop_table_triggers(i_queue_name, fq_table_name); -- restore dropped ddl if tbl.dropped_ddl is not null then -- table is not synced, drop data to make restore faster if pgver >= 80400 then execute 'TRUNCATE ONLY ' || qtbl; else execute 'TRUNCATE ' || qtbl; end if; execute tbl.dropped_ddl; end if; -- reset data update londiste.table_info set local = false, custom_snapshot = null, table_attrs = null, dropped_ddl = null, merge_state = null, dest_table = null where queue_name = i_queue_name and table_name = fq_table_name; -- drop dependent sequence for seqname in select n.nspname || '.' || s.relname from pg_catalog.pg_class s, pg_catalog.pg_namespace n, pg_catalog.pg_attribute a where a.attrelid = tbl_oid and a.atthasdef and a.atttypid::regtype::text in ('integer', 'bigint') and s.oid = pg_get_serial_sequence(qtbl, a.attname)::regclass::oid and n.oid = s.relnamespace loop perform londiste.local_remove_seq(i_queue_name, seqname); end loop; else if not pgq_node.is_root_node(i_queue_name) then select 400, 'Table not registered locally: ' || fq_table_name into ret_code, ret_note; return; end if; end if; if pgq_node.is_root_node(i_queue_name) then perform londiste.global_remove_table(i_queue_name, fq_table_name); perform londiste.root_notify_change(i_queue_name, 'londiste.remove-table', fq_table_name); end if; select 200, 'Table removed: ' || fq_table_name into ret_code, ret_note; return; end; $$ language plpgsql strict; skytools-3.2.6/sql/londiste/functions/londiste.make_fqname.sql0000644000000000000000000000125212426435645021547 0ustar create or replace function londiste.make_fqname(i_name text) returns text as $$ -- ---------------------------------------------------------------------- -- Function: londiste.make_fqname(1) -- -- Make name to schema-qualified one. -- -- First dot is taken as schema separator. -- -- If schema is missing, 'public' is assumed. -- -- Parameters: -- i_name - object name. -- -- Returns: -- Schema qualified name. -- ---------------------------------------------------------------------- begin if position('.' in i_name) > 0 then return i_name; else return 'public.' || i_name; end if; end; $$ language plpgsql strict immutable; skytools-3.2.6/sql/londiste/functions/londiste.upgrade_schema.sql0000644000000000000000000000723012426435645022254 0ustar create or replace function londiste.upgrade_schema() returns int4 as $$ -- updates table structure if necessary declare pgversion int; cnt int4 = 0; begin show server_version_num into pgversion; -- table_info: check (dropped_ddl is null or merge_state in ('in-copy', 'catching-up')) perform 1 from information_schema.check_constraints where constraint_schema = 'londiste' and constraint_name = 'table_info_check' and position('in-copy' in check_clause) > 0 and position('catching' in check_clause) = 0; if found then alter table londiste.table_info drop constraint table_info_check; alter table londiste.table_info add constraint table_info_check check (dropped_ddl is null or merge_state in ('in-copy', 'catching-up')); cnt := cnt + 1; end if; -- table_info.dest_table perform 1 from information_schema.columns where table_schema = 'londiste' and table_name = 'table_info' and column_name = 'dest_table'; if not found then alter table londiste.table_info add column dest_table text; end if; -- table_info: change trigger timing if pgversion >= 90100 then perform 1 from information_schema.triggers where event_object_schema = 'londiste' and event_object_table = 'table_info' and trigger_name = 'table_info_trigger_sync' and action_timing = 'AFTER'; else perform 1 from information_schema.triggers where event_object_schema = 'londiste' and event_object_table = 'table_info' and trigger_name = 'table_info_trigger_sync' and condition_timing = 'AFTER'; end if; if found then drop trigger table_info_trigger_sync on londiste.table_info; create trigger table_info_trigger_sync before delete on londiste.table_info for each row execute procedure londiste.table_info_trigger(); end if; -- applied_execute.dest_table perform 1 from information_schema.columns where table_schema = 'londiste' and table_name = 'applied_execute' and column_name = 'execute_attrs'; if not found then alter table londiste.applied_execute add column execute_attrs text; end if; -- applied_execute: drop queue_name from primary key perform 1 from pg_catalog.pg_indexes where schemaname = 'londiste' and tablename = 'applied_execute' and indexname = 'applied_execute_pkey' and indexdef like '%queue_name%'; if found then alter table londiste.applied_execute drop constraint applied_execute_pkey; alter table londiste.applied_execute add constraint applied_execute_pkey primary key (execute_file); end if; -- applied_execute: drop fkey to pgq_node perform 1 from information_schema.table_constraints where constraint_schema = 'londiste' and table_schema = 'londiste' and table_name = 'applied_execute' and constraint_type = 'FOREIGN KEY' and constraint_name = 'applied_execute_queue_name_fkey'; if found then alter table londiste.applied_execute drop constraint applied_execute_queue_name_fkey; end if; -- create roles perform 1 from pg_catalog.pg_roles where rolname = 'londiste_writer'; if not found then create role londiste_writer in role pgq_admin; cnt := cnt + 1; end if; perform 1 from pg_catalog.pg_roles where rolname = 'londiste_reader'; if not found then create role londiste_reader in role pgq_reader; cnt := cnt + 1; end if; return cnt; end; $$ language plpgsql; skytools-3.2.6/sql/londiste/functions/londiste.root_check_seqs.sql0000644000000000000000000000451012426435645022456 0ustar create or replace function londiste.root_check_seqs( in i_queue_name text, in i_buffer int8, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.root_check_seqs(1) -- -- Check sequences, and publish values if needed. -- -- Parameters: -- i_queue_name - set name -- i_buffer - safety room -- -- Returns: -- 200 - OK -- 402 - Not a root node -- 404 - Queue not found -- ---------------------------------------------------------------------- declare n record; seq record; real_value int8; pub_value int8; real_buffer int8; begin if i_buffer is null or i_buffer < 10 then real_buffer := 10000; else real_buffer := i_buffer; end if; select node_type, node_name into n from pgq_node.node_info where queue_name = i_queue_name for update; if not found then select 404, 'Queue not found: ' || i_queue_name into ret_code, ret_note; return; end if; if n.node_type <> 'root' then select 402, 'Not a root node' into ret_code, ret_note; return; end if; for seq in select seq_name, last_value, londiste.quote_fqname(seq_name) as fqname from londiste.seq_info where queue_name = i_queue_name and local order by nr loop execute 'select last_value from ' || seq.fqname into real_value; if real_value + real_buffer >= seq.last_value then pub_value := real_value + real_buffer * 3; perform pgq.insert_event(i_queue_name, 'londiste.update-seq', pub_value::text, seq.seq_name, null, null, null); update londiste.seq_info set last_value = pub_value where queue_name = i_queue_name and seq_name = seq.seq_name; end if; end loop; select 100, 'Sequences updated' into ret_code, ret_note; return; end; $$ language plpgsql; create or replace function londiste.root_check_seqs( in i_queue_name text, out ret_code int4, out ret_note text) as $$ begin select f.ret_code, f.ret_note into ret_code, ret_note from londiste.root_check_seqs(i_queue_name, 10000) f; return; end; $$ language plpgsql; skytools-3.2.6/sql/londiste/functions/londiste.execute_start.sql0000644000000000000000000000646012426435645022170 0ustar create or replace function londiste.execute_start( in i_queue_name text, in i_file_name text, in i_sql text, in i_expect_root boolean, in i_attrs text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.execute_start(5) -- -- Start execution of DDL. Should be called at the -- start of the transaction that does the SQL execution. -- -- Called-by: -- Londiste setup tool on root, replay on branches/leafs. -- -- Parameters: -- i_queue_name - cascaded queue name -- i_file_name - Unique ID for SQL -- i_sql - Actual script (informative, not used here) -- i_expect_root - Is this on root? Setup tool sets this to avoid -- execution on branches. -- i_attrs - urlencoded dict of extra attributes. -- The value will be put into ev_extra2 -- field of outgoing event. -- -- Returns: -- 200 - Proceed. -- 201 - Already applied -- 401 - Not root. -- 404 - No such queue -- ---------------------------------------------------------------------- declare is_root boolean; begin is_root := pgq_node.is_root_node(i_queue_name); if i_expect_root then if not is_root then select 401, 'Node is not root node: ' || i_queue_name into ret_code, ret_note; return; end if; end if; perform 1 from londiste.applied_execute where execute_file = i_file_name; if found then select 201, 'EXECUTE: "' || i_file_name || '" already applied, skipping' into ret_code, ret_note; return; end if; -- this also lock against potetial parallel execute insert into londiste.applied_execute (queue_name, execute_file, execute_sql, execute_attrs) values (i_queue_name, i_file_name, i_sql, i_attrs); select 200, 'Executing: ' || i_file_name into ret_code, ret_note; return; end; $$ language plpgsql; create or replace function londiste.execute_start( in i_queue_name text, in i_file_name text, in i_sql text, in i_expect_root boolean, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.execute_start(4) -- -- Start execution of DDL. Should be called at the -- start of the transaction that does the SQL execution. -- -- Called-by: -- Londiste setup tool on root, replay on branches/leafs. -- -- Parameters: -- i_queue_name - cascaded queue name -- i_file_name - Unique ID for SQL -- i_sql - Actual script (informative, not used here) -- i_expect_root - Is this on root? Setup tool sets this to avoid -- execution on branches. -- -- Returns: -- 200 - Proceed. -- 301 - Already applied -- 401 - Not root. -- 404 - No such queue -- ---------------------------------------------------------------------- begin select f.ret_code, f.ret_note from londiste.execute_start(i_queue_name, i_file_name, i_sql, i_expect_root, null) f into ret_code, ret_note; return; end; $$ language plpgsql; skytools-3.2.6/sql/londiste/functions/londiste.global_remove_table.sql0000644000000000000000000000236412426435645023274 0ustar create or replace function londiste.global_remove_table( in i_queue_name text, in i_table_name text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.global_remove_table(2) -- -- Removes tables registration in set. -- -- Means that nodes cannot attach to this table anymore. -- -- Called by: -- - On root by londiste.local_remove_table() -- - Elsewhere by consumer receiving table remove event -- -- Returns: -- 200 - OK -- 400 - not found -- ---------------------------------------------------------------------- declare fq_table_name text; begin fq_table_name := londiste.make_fqname(i_table_name); if not pgq_node.is_root_node(i_queue_name) then perform londiste.local_remove_table(i_queue_name, fq_table_name); end if; delete from londiste.table_info where queue_name = i_queue_name and table_name = fq_table_name; if not found then select 400, 'Table not found: ' || fq_table_name into ret_code, ret_note; return; end if; select 200, 'Table removed: ' || i_table_name into ret_code, ret_note; return; end; $$ language plpgsql strict; skytools-3.2.6/sql/londiste/functions/londiste.is_obsolete_partition.sql0000644000000000000000000000340512426435645023705 0ustar create or replace function londiste.is_obsolete_partition ( in i_partition_table text, in i_retention_period interval, in i_partition_period text ) returns boolean as $$ ------------------------------------------------------------------------------- -- Function: londiste.is_obsolete_partition(3) -- -- Test partition name of partition-by-date parent table. -- -- Parameters: -- i_partition_table Partition table name we want to check -- i_retention_period How long to keep partitions around -- i_partition_period One of: year, month, day, hour -- -- Returns: -- True if partition is too old, false if it is not, -- null if its name does not match expected pattern. ------------------------------------------------------------------------------- declare _expr text; _dfmt text; _base text; begin if i_partition_period in ('year', 'yearly') then _expr := '_[0-9]{4}'; _dfmt := '_YYYY'; elsif i_partition_period in ('month', 'monthly') then _expr := '_[0-9]{4}_[0-9]{2}'; _dfmt := '_YYYY_MM'; elsif i_partition_period in ('day', 'daily') then _expr := '_[0-9]{4}_[0-9]{2}_[0-9]{2}'; _dfmt := '_YYYY_MM_DD'; elsif i_partition_period in ('hour', 'hourly') then _expr := '_[0-9]{4}_[0-9]{2}_[0-9]{2}_[0-9]{2}'; _dfmt := '_YYYY_MM_DD_HH24'; else raise exception 'not supported i_partition_period: %', i_partition_period; end if; _expr = '^(.+)' || _expr || '$'; _base = substring (i_partition_table from _expr); if _base is null then return null; elsif i_partition_table < _base || to_char (now() - i_retention_period, _dfmt) then return true; else return false; end if; end; $$ language plpgsql; skytools-3.2.6/sql/londiste/functions/londiste.drop_obsolete_partitions.sql0000644000000000000000000000175612426435645024430 0ustar create or replace function londiste.drop_obsolete_partitions ( in i_parent_table text, in i_retention_period interval, in i_partition_period text ) returns setof text as $$ ------------------------------------------------------------------------------- -- Function: londiste.drop_obsolete_partitions(3) -- -- Drop obsolete partitions of partition-by-date parent table. -- -- Parameters: -- i_parent_table Master table from which partitions are inherited -- i_retention_period How long to keep partitions around -- i_partition_period One of: year, month, day, hour -- -- Returns: -- Names of partitions dropped ------------------------------------------------------------------------------- declare _part text; begin for _part in select londiste.list_obsolete_partitions (i_parent_table, i_retention_period, i_partition_period) loop execute 'drop table '|| _part; return next _part; end loop; end; $$ language plpgsql; skytools-3.2.6/sql/londiste/functions/londiste.table_info_trigger.sql0000644000000000000000000000107012426435645023126 0ustar create or replace function londiste.table_info_trigger() returns trigger as $$ -- ---------------------------------------------------------------------- -- Function: londiste.table_info_trigger(0) -- -- Trigger on londiste.table_info. Cleans triggers from tables -- when table is removed from londiste.table_info. -- ---------------------------------------------------------------------- begin if TG_OP = 'DELETE' then perform londiste.drop_table_triggers(OLD.queue_name, OLD.table_name); end if; return OLD; end; $$ language plpgsql; skytools-3.2.6/sql/londiste/functions/londiste.split_fqname.sql0000644000000000000000000000153012426435645021764 0ustar create or replace function londiste.split_fqname( in i_fqname text, out schema_part text, out name_part text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.split_fqname(1) -- -- Split fqname to schema and name parts. -- -- First dot is taken as schema separator. -- -- If schema is missing, 'public' is assumed. -- -- Parameters: -- i_fqname - object name. -- ---------------------------------------------------------------------- declare dot integer; begin dot = position('.' in i_fqname); if dot > 0 then schema_part = substring(i_fqname for dot - 1); name_part = substring(i_fqname from dot + 1); else schema_part = 'public'; name_part = i_fqname; end if; return; end; $$ language plpgsql strict immutable; skytools-3.2.6/sql/londiste/functions/londiste.global_update_seq.sql0000644000000000000000000000322012426435645022752 0ustar create or replace function londiste.global_update_seq( in i_queue_name text, in i_seq_name text, in i_value int8, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.global_update_seq(3) -- -- Update seq. -- -- Parameters: -- i_queue_name - set name -- i_seq_name - seq name -- i_value - new published value -- -- Returns: -- 200 - OK -- ---------------------------------------------------------------------- declare n record; fqname text; seq record; begin select node_type, node_name into n from pgq_node.node_info where queue_name = i_queue_name; if not found then select 404, 'Set not found: ' || i_queue_name into ret_code, ret_note; return; end if; if n.node_type = 'root' then select 402, 'Must not run on root node' into ret_code, ret_note; return; end if; fqname := londiste.make_fqname(i_seq_name); select last_value, local from londiste.seq_info into seq where queue_name = i_queue_name and seq_name = fqname for update; if not found then insert into londiste.seq_info (queue_name, seq_name, last_value) values (i_queue_name, fqname, i_value); else update londiste.seq_info set last_value = i_value where queue_name = i_queue_name and seq_name = fqname; if seq.local then perform pgq.seq_setval(fqname, i_value); end if; end if; select 200, 'Sequence updated' into ret_code, ret_note; return; end; $$ language plpgsql; skytools-3.2.6/sql/londiste/functions/londiste.handle_fkeys.sql0000644000000000000000000000745612426435645021753 0ustar create or replace function londiste.get_table_pending_fkeys(i_table_name text) returns setof londiste.pending_fkeys as $$ -- ---------------------------------------------------------------------- -- Function: londiste.get_table_pending_fkeys(1) -- -- Return dropped fkeys for table. -- -- Parameters: -- i_table_name - fqname -- -- Returns: -- desc -- ---------------------------------------------------------------------- declare fkeys record; begin for fkeys in select * from londiste.pending_fkeys where from_table = i_table_name or to_table = i_table_name order by 1,2,3 loop return next fkeys; end loop; return; end; $$ language plpgsql strict stable; create or replace function londiste.get_valid_pending_fkeys(i_queue_name text) returns setof londiste.pending_fkeys as $$ -- ---------------------------------------------------------------------- -- Function: londiste.get_valid_pending_fkeys(1) -- -- Returns dropped fkeys where both sides are in sync now. -- -- Parameters: -- i_queue_name - cascaded queue name -- -- Returns: -- desc -- ---------------------------------------------------------------------- declare fkeys record; begin for fkeys in select pf.* from londiste.pending_fkeys pf order by 1, 2, 3 loop perform 1 from londiste.table_info st_from where coalesce(st_from.dest_table, st_from.table_name) = fkeys.from_table and st_from.merge_state = 'ok' and st_from.custom_snapshot is null and st_from.queue_name = i_queue_name; if not found then continue; end if; perform 1 from londiste.table_info st_to where coalesce(st_to.dest_table, st_to.table_name) = fkeys.to_table and st_to.merge_state = 'ok' and st_to.custom_snapshot is null and st_to.queue_name = i_queue_name; if not found then continue; end if; return next fkeys; end loop; return; end; $$ language plpgsql strict stable; create or replace function londiste.drop_table_fkey(i_from_table text, i_fkey_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: londiste.drop_table_fkey(2) -- -- Drop one fkey, save in pending table. -- ---------------------------------------------------------------------- declare fkey record; begin select * into fkey from londiste.find_table_fkeys(i_from_table) where fkey_name = i_fkey_name and from_table = i_from_table; if not found then return 0; end if; insert into londiste.pending_fkeys values (fkey.from_table, fkey.to_table, i_fkey_name, fkey.fkey_def); execute 'alter table only ' || londiste.quote_fqname(fkey.from_table) || ' drop constraint ' || quote_ident(i_fkey_name); return 1; end; $$ language plpgsql strict; create or replace function londiste.restore_table_fkey(i_from_table text, i_fkey_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: londiste.restore_table_fkey(2) -- -- Restore dropped fkey. -- -- Parameters: -- i_from_table - source table -- i_fkey_name - fkey name -- -- Returns: -- nothing -- ---------------------------------------------------------------------- declare fkey record; begin select * into fkey from londiste.pending_fkeys where fkey_name = i_fkey_name and from_table = i_from_table; if not found then return 0; end if; execute fkey.fkey_def; delete from londiste.pending_fkeys where fkey_name = fkey.fkey_name; return 1; end; $$ language plpgsql strict; skytools-3.2.6/sql/londiste/functions/londiste.periodic_maintenance.sql0000644000000000000000000000077612426435645023455 0ustar create or replace function londiste.periodic_maintenance() returns integer as $$ -- ---------------------------------------------------------------------- -- Function: londiste.periodic_maintenance(0) -- -- Clean random stuff. -- ---------------------------------------------------------------------- begin -- clean old EXECUTE entries delete from londiste.applied_execute where execute_time < now() - '3 months'::interval; return 0; end; $$ language plpgsql; -- need admin access skytools-3.2.6/sql/londiste/functions/londiste.version.sql0000644000000000000000000000066512426435645020777 0ustar create or replace function londiste.version() returns text as $$ -- ---------------------------------------------------------------------- -- Function: londiste.version(0) -- -- Returns version string for londiste. ATM it is based on SkyTools -- version and only bumped when database code changes. -- ---------------------------------------------------------------------- begin return '3.2.4'; end; $$ language plpgsql; skytools-3.2.6/sql/londiste/functions/londiste.local_remove_seq.sql0000644000000000000000000000230012426435645022615 0ustar create or replace function londiste.local_remove_seq( in i_queue_name text, in i_seq_name text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_remove_seq(2) -- -- Remove sequence. -- -- Parameters: -- i_queue_name - set name -- i_seq_name - sequence name -- -- Returns: -- 200 - OK -- 404 - Sequence not found -- ---------------------------------------------------------------------- declare fqname text; begin fqname := londiste.make_fqname(i_seq_name); if pgq_node.is_root_node(i_queue_name) then select f.ret_code, f.ret_note into ret_code, ret_note from londiste.global_remove_seq(i_queue_name, fqname) f; return; end if; update londiste.seq_info set local = false where queue_name = i_queue_name and seq_name = fqname and local; if not found then select 404, 'Sequence not found: '||fqname into ret_code, ret_note; return; end if; select 200, 'Sequence removed: '||fqname into ret_code, ret_note; return; end; $$ language plpgsql strict; skytools-3.2.6/sql/londiste/functions/londiste.find_table_fkeys.sql0000644000000000000000000000271312426435645022576 0ustar create or replace function londiste.find_table_fkeys(i_table_name text) returns setof londiste.pending_fkeys as $$ -- ---------------------------------------------------------------------- -- Function: londiste.find_table_fkeys(1) -- -- Return all active fkeys. -- -- Parameters: -- i_table_name - fqname -- -- Returns: -- from_table - fqname -- to_table - fqname -- fkey_name - name -- fkey_def - full def -- ---------------------------------------------------------------------- declare fkey record; tbl_oid oid; begin select londiste.find_table_oid(i_table_name) into tbl_oid; for fkey in select n1.nspname || '.' || t1.relname as from_table, n2.nspname || '.' || t2.relname as to_table, conname::text as fkey_name, 'alter table only ' || quote_ident(n1.nspname) || '.' || quote_ident(t1.relname) || ' add constraint ' || quote_ident(conname::text) || ' ' || pg_get_constraintdef(c.oid) as fkey_def from pg_constraint c, pg_namespace n1, pg_class t1, pg_namespace n2, pg_class t2 where c.contype = 'f' and (c.conrelid = tbl_oid or c.confrelid = tbl_oid) and t1.oid = c.conrelid and n1.oid = t1.relnamespace and t2.oid = c.confrelid and n2.oid = t2.relnamespace order by 1,2,3 loop return next fkey; end loop; return; end; $$ language plpgsql strict stable; skytools-3.2.6/sql/londiste/functions/londiste.global_remove_seq.sql0000644000000000000000000000220312426435645022765 0ustar create or replace function londiste.global_remove_seq( in i_queue_name text, in i_seq_name text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.global_remove_seq(2) -- -- Removes sequence registration in set. -- -- Called by: -- - On root by londiste.local_remove_seq() -- - Elsewhere by consumer receiving seq remove event -- -- Returns: -- 200 - OK -- 400 - not found -- ---------------------------------------------------------------------- declare fq_name text; begin fq_name := londiste.make_fqname(i_seq_name); delete from londiste.seq_info where queue_name = i_queue_name and seq_name = fq_name; if not found then select 400, 'Sequence not found: '||fq_name into ret_code, ret_note; return; end if; if pgq_node.is_root_node(i_queue_name) then perform londiste.root_notify_change(i_queue_name, 'londiste.remove-seq', fq_name); end if; select 200, 'Sequence removed: '||fq_name into ret_code, ret_note; return; end; $$ language plpgsql strict; skytools-3.2.6/sql/londiste/functions/londiste.local_add_seq.sql0000644000000000000000000000471712426435645022066 0ustar create or replace function londiste.local_add_seq( in i_queue_name text, in i_seq_name text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_add_seq(2) -- -- Register sequence. -- -- Parameters: -- i_queue_name - cascaded queue name -- i_seq_name - seq name -- -- Returns: -- 200 - OK -- 400 - Not found -- ---------------------------------------------------------------------- declare fq_seq_name text; lastval int8; seq record; begin fq_seq_name := londiste.make_fqname(i_seq_name); perform 1 from pg_class where oid = londiste.find_seq_oid(fq_seq_name); if not found then select 400, 'Sequence not found: ' || fq_seq_name into ret_code, ret_note; return; end if; if pgq_node.is_root_node(i_queue_name) then select local, last_value into seq from londiste.seq_info where queue_name = i_queue_name and seq_name = fq_seq_name for update; if found and seq.local then select 201, 'Sequence already added: ' || fq_seq_name into ret_code, ret_note; return; end if; if not seq.local then update londiste.seq_info set local = true where queue_name = i_queue_name and seq_name = fq_seq_name; else insert into londiste.seq_info (queue_name, seq_name, local, last_value) values (i_queue_name, fq_seq_name, true, 0); end if; perform * from londiste.root_check_seqs(i_queue_name); else select local, last_value into seq from londiste.seq_info where queue_name = i_queue_name and seq_name = fq_seq_name for update; if not found then select 404, 'Unknown sequence: ' || fq_seq_name into ret_code, ret_note; return; end if; if seq.local then select 201, 'Sequence already added: ' || fq_seq_name into ret_code, ret_note; return; end if; update londiste.seq_info set local = true where queue_name = i_queue_name and seq_name = fq_seq_name; perform pgq.seq_setval(fq_seq_name, seq.last_value); end if; select 200, 'Sequence added: ' || fq_seq_name into ret_code, ret_note; return; end; $$ language plpgsql; skytools-3.2.6/sql/londiste/functions/londiste.local_set_table_state.sql0000644000000000000000000000232212426435645023616 0ustar create or replace function londiste.local_set_table_state( in i_queue_name text, in i_table_name text, in i_snapshot text, in i_merge_state text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_set_table_state(4) -- -- Change table state. -- -- Parameters: -- i_queue_name - cascaded queue name -- i_table - table name -- i_snapshot - optional remote snapshot info -- i_merge_state - merge state -- ---------------------------------------------------------------------- declare _tbl text; begin _tbl = londiste.make_fqname(i_table_name); update londiste.table_info set custom_snapshot = i_snapshot, merge_state = i_merge_state where queue_name = i_queue_name and table_name = _tbl and local; if not found then select 404, 'No such table: ' || _tbl into ret_code, ret_note; return; end if; select 200, 'Table ' || _tbl || ' state set to ' || coalesce(quote_literal(i_merge_state), 'NULL') into ret_code, ret_note; return; end; $$ language plpgsql; skytools-3.2.6/sql/londiste/functions/londiste.is_replica_func.sql0000644000000000000000000000115112426435645022426 0ustar create or replace function londiste.is_replica_func(func_oid oid) returns boolean as $$ -- ---------------------------------------------------------------------- -- Function: londiste.is_replica_func(1) -- -- Returns true if function is a PgQ-based replication functions. -- This also means it takes queue name as first argument. -- ---------------------------------------------------------------------- select count(1) > 0 from pg_proc f join pg_namespace n on (n.oid = f.pronamespace) where f.oid = $1 and n.nspname = 'pgq' and f.proname in ('sqltriga', 'logutriga'); $$ language sql strict stable; skytools-3.2.6/sql/londiste/functions/londiste.local_show_missing.sql0000644000000000000000000000345612426435645023176 0ustar create or replace function londiste.local_show_missing( in i_queue_name text, out obj_kind text, out obj_name text) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_show_missing(1) -- -- Return info about missing tables. On root show tables -- not registered on set, on branch/leaf show tables -- in set but not registered locally. -- ---------------------------------------------------------------------- begin if pgq_node.is_root_node(i_queue_name) then for obj_kind, obj_name in select r.relkind, n.nspname || '.' || r.relname from pg_catalog.pg_class r, pg_catalog.pg_namespace n where n.oid = r.relnamespace and r.relkind in ('r', 'S') and n.nspname not in ('pgq', 'pgq_ext', 'pgq_node', 'londiste', 'pg_catalog', 'information_schema') and n.nspname !~ '^pg_(toast|temp)' and not exists (select 1 from londiste.table_info where queue_name = i_queue_name and local and coalesce(dest_table, table_name) = (n.nspname || '.' || r.relname)) order by 1, 2 loop return next; end loop; else for obj_kind, obj_name in select 'S', s.seq_name from londiste.seq_info s where s.queue_name = i_queue_name and not s.local union all select 'r', t.table_name from londiste.table_info t where t.queue_name = i_queue_name and not t.local order by 1, 2 loop return next; end loop; end if; return; end; $$ language plpgsql strict stable; skytools-3.2.6/sql/londiste/functions/londiste.create_trigger.sql0000644000000000000000000002130212426435645022267 0ustar create or replace function londiste.create_trigger( in i_queue_name text, in i_table_name text, in i_trg_args text[], in i_dest_table text, in i_node_type text, out ret_code int4, out ret_note text, out trigger_name text) as $$ ------------------------------------------------------------------------ -- Function: londiste.create_trigger(5) -- -- Create or replace londiste trigger(s) -- -- Parameters: -- i_queue_name - queue name -- i_table_name - table name -- i_trg_args - args to trigger -- i_dest_table - actual name of destination table (NULL if same as src) -- i_node_type - l3 node type -- -- Trigger args: -- See documentation for pgq triggers. -- -- Trigger creation flags (default: AIUDL): -- I - ON INSERT -- U - ON UPDATE -- D - ON DELETE -- Q - use pgq.sqltriga() as trigger function -- L - use pgq.logutriga() as trigger function -- B - BEFORE -- A - AFTER -- S - SKIP -- -- Returns: -- 200 - Ok -- 201 - Trigger not created -- 405 - Multiple SKIP triggers -- ------------------------------------------------------------------------ declare trigger_name text; lg_func text; lg_pos text; lg_event text; lg_args text[]; _old_tgargs bytea; _new_tgargs bytea; trunctrg_name text; pgversion int; sql text; arg text; i integer; _extra_args text[] := '{}'; -- skip trigger _skip_prefix text := 'zzz_'; _skip_trg_count integer; _skip_trg_name text; -- given tgflags array _tgflags char[]; -- ordinary argument array _args text[]; -- array with all valid tgflags values _valid_flags char[] := array['B','A','Q','L','I','U','D','S']; -- argument flags _skip boolean := false; _no_triggers boolean := false; _got_extra1 boolean := false; begin -- parse trigger args if array_lower(i_trg_args, 1) is not null then for i in array_lower(i_trg_args, 1) .. array_upper(i_trg_args, 1) loop arg := i_trg_args[i]; if arg like 'tgflags=%' then -- special flag handling arg := upper(substr(arg, 9)); for j in array_lower(_valid_flags, 1) .. array_upper(_valid_flags, 1) loop if position(_valid_flags[j] in arg) > 0 then _tgflags := array_append(_tgflags, _valid_flags[j]); end if; end loop; elsif arg = 'no_triggers' then _no_triggers := true; elsif lower(arg) = 'skip' then _skip := true; elsif arg = 'virtual_table' then _no_triggers := true; -- do not create triggers elsif arg not in ('expect_sync', 'skip_truncate', 'merge_all', 'no_merge') then -- ignore add-table args if arg like 'ev_extra1=%' then _got_extra1 := true; end if; -- ordinary arg _args = array_append(_args, quote_literal(arg)); end if; end loop; end if; if i_dest_table <> i_table_name and not _got_extra1 then -- if renamed table, enforce trigger to put -- global table name into extra1 arg := 'ev_extra1=' || quote_literal(i_table_name); _args := array_append(_args, quote_literal(arg)); end if; trigger_name := '_londiste_' || i_queue_name; lg_func := 'pgq.logutriga'; lg_event := ''; lg_args := array[quote_literal(i_queue_name)]; lg_pos := 'after'; if array_lower(_args, 1) is not null then lg_args := lg_args || _args; end if; if 'B' = any(_tgflags) then lg_pos := 'before'; end if; if 'A' = any(_tgflags) then lg_pos := 'after'; end if; if 'Q' = any(_tgflags) then lg_func := 'pgq.sqltriga'; end if; if 'L' = any(_tgflags) then lg_func := 'pgq.logutriga'; end if; if 'I' = any(_tgflags) then lg_event := lg_event || ' or insert'; end if; if 'U' = any(_tgflags) then lg_event := lg_event || ' or update'; end if; if 'D' = any(_tgflags) then lg_event := lg_event || ' or delete'; end if; if 'S' = any(_tgflags) then _skip := true; end if; if i_node_type = 'leaf' then -- on weird leafs the trigger funcs may not exist perform 1 from pg_proc p join pg_namespace n on (n.oid = p.pronamespace) where n.nspname = 'pgq' and p.proname in ('logutriga', 'sqltriga'); if not found then select 201, 'Trigger not created' into ret_code, ret_note; return; end if; -- on regular leaf, install deny trigger _extra_args := array_append(_extra_args, quote_literal('deny')); end if; if _skip or lg_pos = 'after' then -- get count and name of existing skip triggers select count(*), min(t.tgname) into _skip_trg_count, _skip_trg_name from pg_catalog.pg_trigger t where t.tgrelid = londiste.find_table_oid(i_dest_table) and position(E'\\000SKIP\\000'::bytea in tgargs) > 0; end if; -- make sure new trigger won't be effectively inactive if lg_pos = 'after' and _skip_trg_count > 0 then select 403, 'AFTER trigger cannot work with SKIP trigger(s)' into ret_code, ret_note; return; end if; -- if skip param given, rename previous skip triggers and prefix current if _skip then -- if no previous skip triggers, prefix name and add SKIP to args if _skip_trg_count = 0 then trigger_name := _skip_prefix || trigger_name; lg_args := array_append(lg_args, quote_literal('SKIP')); -- if one previous skip trigger, check it's prefix and -- do not use SKIP on current trigger elsif _skip_trg_count = 1 then -- if not prefixed then rename if position(_skip_prefix in _skip_trg_name) != 1 then sql := 'alter trigger ' || _skip_trg_name || ' on ' || londiste.quote_fqname(i_dest_table) || ' rename to ' || _skip_prefix || _skip_trg_name; execute sql; end if; else select 405, 'Multiple SKIP triggers' into ret_code, ret_note; return; end if; end if; -- create Ins/Upd/Del trigger if it does not exists already select t.tgargs from pg_catalog.pg_trigger t where t.tgrelid = londiste.find_table_oid(i_dest_table) and t.tgname = trigger_name into _old_tgargs; if found then _new_tgargs := decode(lg_args[1], 'escape'); for i in 2 .. array_upper(lg_args, 1) loop _new_tgargs := _new_tgargs || E'\\000'::bytea || decode(lg_args[i], 'escape'); end loop; if _old_tgargs is distinct from _new_tgargs then sql := 'drop trigger if exists ' || quote_ident(trigger_name) || ' on ' || londiste.quote_fqname(i_dest_table); execute sql; end if; end if; if not found or _old_tgargs is distinct from _new_tgargs then if _no_triggers then select 201, 'Trigger not created' into ret_code, ret_note; return; end if; -- finalize event lg_event := substr(lg_event, 4); -- remove ' or ' if lg_event = '' then lg_event := 'insert or update or delete'; end if; -- create trigger lg_args := lg_args || _extra_args; sql := 'create trigger ' || quote_ident(trigger_name) || ' ' || lg_pos || ' ' || lg_event || ' on ' || londiste.quote_fqname(i_dest_table) || ' for each row execute procedure ' || lg_func || '(' || array_to_string(lg_args, ', ') || ')'; execute sql; end if; -- create truncate trigger if it does not exists already show server_version_num into pgversion; if pgversion >= 80400 then trunctrg_name := '_londiste_' || i_queue_name || '_truncate'; perform 1 from pg_catalog.pg_trigger where tgrelid = londiste.find_table_oid(i_dest_table) and tgname = trunctrg_name; if not found then _extra_args := quote_literal(i_queue_name) || _extra_args; sql := 'create trigger ' || quote_ident(trunctrg_name) || ' after truncate on ' || londiste.quote_fqname(i_dest_table) || ' for each statement execute procedure pgq.sqltriga(' || array_to_string(_extra_args, ', ') || ')'; execute sql; end if; end if; select 200, 'OK' into ret_code, ret_note; return; end; $$ language plpgsql; skytools-3.2.6/sql/londiste/functions/londiste.local_add_table.sql0000644000000000000000000003477612426435645022375 0ustar create or replace function londiste.local_add_table( in i_queue_name text, in i_table_name text, in i_trg_args text[], in i_table_attrs text, in i_dest_table text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_add_table(5) -- -- Register table on Londiste node, with customizable trigger args. -- -- Parameters: -- i_queue_name - queue name -- i_table_name - table name -- i_trg_args - args to trigger, or magic parameters. -- i_table_attrs - args to python handler -- i_dest_table - actual name of destination table (NULL if same) -- -- Trigger args: -- See documentation for pgq triggers. -- -- Magic parameters: -- no_triggers - skip trigger creation -- skip_truncate - set 'skip_truncate' table attribute -- expect_sync - set table state to 'ok' -- tgflags=X - trigger creation flags -- merge_all - merge table from all sources. required for -- multi-source table -- no_merge - do not merge tables from different sources -- skip - create skip trigger. same as S flag -- virtual_table - skips structure check and trigger creation -- -- Trigger creation flags (default: AIUDL): -- I - ON INSERT -- U - ON UPDATE -- D - ON DELETE -- Q - use pgq.sqltriga() as trigger function -- L - use pgq.logutriga() as trigger function -- B - BEFORE -- A - AFTER -- S - SKIP -- -- Example: -- > londiste.local_add_table('q', 'tbl', array['tgflags=BI', 'SKIP', 'pkey=col1,col2']) -- -- Returns: -- 200 - Ok -- 301 - Warning, trigger exists that will fire before londiste one -- 400 - No such set -- 410 - Table already exists but with different table_attrs ------------------------------------------------------------------------ declare col_types text; fq_table_name text; new_state text; pgversion int; logtrg_previous text; trigger_name text; tbl record; i integer; j integer; arg text; _node record; _tbloid oid; _combined_queue text; _combined_table text; _table_attrs text := i_table_attrs; -- check local tables from all sources _queue_name text; _local boolean; -- argument flags _expect_sync boolean := false; _merge_all boolean := false; _no_merge boolean := false; _virtual_table boolean := false; _dest_table text; _table_name2 text; _desc text; begin -------- i_trg_args ARGUMENTS PARSING (TODO: use different input param for passing extra options that have nothing to do with trigger) if array_lower(i_trg_args, 1) is not null then for i in array_lower(i_trg_args, 1) .. array_upper(i_trg_args, 1) loop arg := i_trg_args[i]; if arg = 'expect_sync' then _expect_sync := true; elsif arg = 'skip_truncate' then _table_attrs := coalesce(_table_attrs || '&skip_truncate=1', 'skip_truncate=1'); elsif arg = 'merge_all' then _merge_all = true; elsif arg = 'no_merge' then _no_merge = true; elsif arg = 'virtual_table' then _virtual_table := true; _expect_sync := true; -- do not copy end if; end loop; end if; if _merge_all and _no_merge then select 405, 'Cannot use merge-all and no-merge together' into ret_code, ret_note; return; end if; fq_table_name := londiste.make_fqname(i_table_name); _dest_table := londiste.make_fqname(coalesce(i_dest_table, i_table_name)); if _dest_table = fq_table_name then _desc := fq_table_name; else _desc := fq_table_name || '(' || _dest_table || ')'; end if; -------- TABLE STRUCTURE CHECK if not _virtual_table then _tbloid := londiste.find_table_oid(_dest_table); if _tbloid is null then select 404, 'Table does not exist: ' || _desc into ret_code, ret_note; return; end if; col_types := londiste.find_column_types(_dest_table); if position('k' in col_types) < 1 then -- allow missing primary key in case of combined table where -- pkey was removed by londiste perform 1 from londiste.table_info t, pgq_node.node_info n_this, pgq_node.node_info n_other where n_this.queue_name = i_queue_name and n_other.combined_queue = n_this.combined_queue and n_other.queue_name <> n_this.queue_name and t.queue_name = n_other.queue_name and coalesce(t.dest_table, t.table_name) = _dest_table and t.dropped_ddl is not null; if not found then select 400, 'Primary key missing on table: ' || _desc into ret_code, ret_note; return; end if; end if; end if; -------- TABLE REGISTRATION LOGIC select * from pgq_node.get_node_info(i_queue_name) into _node; if not found or _node.ret_code >= 400 then select 400, 'No such set: ' || i_queue_name into ret_code, ret_note; return; end if; select merge_state, local, table_attrs into tbl from londiste.table_info where queue_name = i_queue_name and table_name = fq_table_name; if not found then -- add to set on root if _node.node_type = 'root' then select f.ret_code, f.ret_note into ret_code, ret_note from londiste.global_add_table(i_queue_name, i_table_name) f; if ret_code <> 200 then return; end if; else select 404, 'Table not available on queue: ' || _desc into ret_code, ret_note; return; end if; -- reload info select merge_state, local, table_attrs into tbl from londiste.table_info where queue_name = i_queue_name and table_name = fq_table_name; end if; if tbl.local then if tbl.table_attrs is distinct from _table_attrs then select 410, 'Table ' || _desc || ' already added, but with different args: ' || coalesce(tbl.table_attrs, '') into ret_code, ret_note; else select 200, 'Table already added: ' || _desc into ret_code, ret_note; end if; return; end if; if _node.node_type = 'root' then new_state := 'ok'; perform londiste.root_notify_change(i_queue_name, 'londiste.add-table', fq_table_name); elsif _node.node_type = 'leaf' and _node.combined_type = 'branch' then new_state := 'ok'; elsif _expect_sync then new_state := 'ok'; else new_state := NULL; end if; update londiste.table_info set local = true, merge_state = new_state, table_attrs = coalesce(_table_attrs, table_attrs), dest_table = nullif(_dest_table, fq_table_name) where queue_name = i_queue_name and table_name = fq_table_name; if not found then raise exception 'lost table: %', fq_table_name; end if; -- merge all table sources on leaf if _node.node_type = 'leaf' and not _no_merge then for _queue_name, _table_name2, _local in select t2.queue_name, t2.table_name, t2.local from londiste.table_info t join pgq_node.node_info n on (n.queue_name = t.queue_name) left join pgq_node.node_info n2 on (n2.combined_queue = n.combined_queue or (n2.combined_queue is null and n.combined_queue is null)) left join londiste.table_info t2 on (t2.queue_name = n2.queue_name and coalesce(t2.dest_table, t2.table_name) = coalesce(t.dest_table, t.table_name)) where t.queue_name = i_queue_name and t.table_name = fq_table_name and t2.queue_name != i_queue_name -- skip self loop -- if table from some other source is already marked as local, -- raise error if _local and coalesce(new_state, 'x') <> 'ok' then select 405, 'Found local table '|| _desc || ' in queue ' || _queue_name || ', use remove-table first to remove all previous ' || 'table subscriptions' into ret_code, ret_note; return; end if; -- when table comes from multiple sources, merge_all switch is -- required if not _merge_all and coalesce(new_state, 'x') <> 'ok' then select 405, 'Found multiple sources for table '|| _desc || ', use merge-all or no-merge to continue' into ret_code, ret_note; return; end if; update londiste.table_info set local = true, merge_state = new_state, table_attrs = coalesce(_table_attrs, table_attrs) where queue_name = _queue_name and table_name = _table_name2; if not found then raise exception 'lost table: % on queue %', _table_name2, _queue_name; end if; end loop; -- if this node has combined_queue, add table there too -- note: we need to keep both table_name/dest_table values select n2.queue_name, t.table_name from pgq_node.node_info n1 join pgq_node.node_info n2 on (n2.queue_name = n1.combined_queue) left join londiste.table_info t on (t.queue_name = n2.queue_name and t.table_name = fq_table_name and t.local) where n1.queue_name = i_queue_name and n2.node_type = 'root' into _combined_queue, _combined_table; if found and _combined_table is null then select f.ret_code, f.ret_note from londiste.local_add_table(_combined_queue, fq_table_name, i_trg_args, _table_attrs, _dest_table) f into ret_code, ret_note; if ret_code >= 300 then return; end if; end if; end if; -- create trigger select f.ret_code, f.ret_note, f.trigger_name from londiste.create_trigger(i_queue_name, fq_table_name, i_trg_args, _dest_table, _node.node_type) f into ret_code, ret_note, trigger_name; if ret_code > 299 then ret_note := 'Trigger creation failed for table ' || _desc || ': ' || ret_note; return; elsif ret_code = 201 then select 200, 'Table added with no triggers: ' || _desc into ret_code, ret_note; return; end if; -- Check that no trigger exists on the target table that will get fired -- before londiste one (this could have londiste replicate data out-of-order) -- -- Don't report all the trigger names, 8.3 does not have array_accum available. show server_version_num into pgversion; if pgversion >= 90000 then select tg.tgname into logtrg_previous from pg_class r join pg_trigger tg on (tg.tgrelid = r.oid) where r.oid = londiste.find_table_oid(_dest_table) and not tg.tgisinternal and tg.tgname < trigger_name::name -- per-row AFTER trigger and (tg.tgtype & 3) = 1 -- bits: 0:ROW, 1:BEFORE -- current londiste and not londiste.is_replica_func(tg.tgfoid) -- old londiste and substring(tg.tgname from 1 for 10) != '_londiste_' and substring(tg.tgname from char_length(tg.tgname) - 6) != '_logger' order by 1 limit 1; else select tg.tgname into logtrg_previous from pg_class r join pg_trigger tg on (tg.tgrelid = r.oid) where r.oid = londiste.find_table_oid(_dest_table) and not tg.tgisconstraint and tg.tgname < trigger_name::name -- per-row AFTER trigger and (tg.tgtype & 3) = 1 -- bits: 0:ROW, 1:BEFORE -- current londiste and not londiste.is_replica_func(tg.tgfoid) -- old londiste and substring(tg.tgname from 1 for 10) != '_londiste_' and substring(tg.tgname from char_length(tg.tgname) - 6) != '_logger' order by 1 limit 1; end if; if logtrg_previous is not null then select 301, 'Table added: ' || _desc || ', but londiste trigger is not first: ' || logtrg_previous into ret_code, ret_note; return; end if; select 200, 'Table added: ' || _desc into ret_code, ret_note; return; end; $$ language plpgsql; create or replace function londiste.local_add_table( in i_queue_name text, in i_table_name text, in i_trg_args text[], in i_table_attrs text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_add_table(4) -- -- Register table on Londiste node. -- ---------------------------------------------------------------------- begin select f.ret_code, f.ret_note into ret_code, ret_note from londiste.local_add_table(i_queue_name, i_table_name, i_trg_args, i_table_attrs, null) f; return; end; $$ language plpgsql; create or replace function londiste.local_add_table( in i_queue_name text, in i_table_name text, in i_trg_args text[], out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_add_table(3) -- -- Register table on Londiste node. -- ---------------------------------------------------------------------- begin select f.ret_code, f.ret_note into ret_code, ret_note from londiste.local_add_table(i_queue_name, i_table_name, i_trg_args, null) f; return; end; $$ language plpgsql; create or replace function londiste.local_add_table( in i_queue_name text, in i_table_name text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_add_table(2) -- -- Register table on Londiste node. -- ---------------------------------------------------------------------- begin select f.ret_code, f.ret_note into ret_code, ret_note from londiste.local_add_table(i_queue_name, i_table_name, null) f; return; end; $$ language plpgsql strict; skytools-3.2.6/sql/londiste/functions/londiste.global_add_table.sql0000644000000000000000000000363412426435645022530 0ustar create or replace function londiste.global_add_table( in i_queue_name text, in i_table_name text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.global_add_table(2) -- -- Register table on Londiste set. -- -- This means its available from root, events for it appear -- in queue and nodes can attach to it. -- -- Called by: -- on root - londiste.local_add_table() -- elsewhere - londiste consumer when receives new table event -- -- Returns: -- 200 - Ok -- 400 - No such set -- ---------------------------------------------------------------------- declare fq_table_name text; _cqueue text; begin fq_table_name := londiste.make_fqname(i_table_name); select combined_queue into _cqueue from pgq_node.node_info where queue_name = i_queue_name for update; if not found then select 400, 'No such queue: ' || i_queue_name into ret_code, ret_note; return; end if; perform 1 from londiste.table_info where queue_name = i_queue_name and table_name = fq_table_name; if found then select 200, 'Table already added: ' || fq_table_name into ret_code, ret_note; return; end if; insert into londiste.table_info (queue_name, table_name) values (i_queue_name, fq_table_name); select 200, 'Table added: ' || i_table_name into ret_code, ret_note; -- let the combined node know about it too if _cqueue is not null then perform londiste.global_add_table(_cqueue, i_table_name); end if; return; exception -- seems the row was added from parallel connection (setup vs. replay) when unique_violation then select 200, 'Table already added: ' || i_table_name into ret_code, ret_note; return; end; $$ language plpgsql strict; skytools-3.2.6/sql/londiste/functions/londiste.get_seq_list.sql0000644000000000000000000000161312426435645021766 0ustar create or replace function londiste.get_seq_list( in i_queue_name text, out seq_name text, out last_value int8, out local boolean) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: londiste.get_seq_list(1) -- -- Returns registered seqs on this Londiste node. -- -- Result fiels: -- seq_name - fully qualified name of sequence -- last_value - last globally published value -- local - is locally registered -- ---------------------------------------------------------------------- declare rec record; begin for seq_name, last_value, local in select s.seq_name, s.last_value, s.local from londiste.seq_info s where s.queue_name = i_queue_name order by s.nr, s.seq_name loop return next; end loop; return; end; $$ language plpgsql strict; skytools-3.2.6/sql/londiste/functions/londiste.root_notify_change.sql0000644000000000000000000000124212426435645023162 0ustar create or replace function londiste.root_notify_change(i_queue_name text, i_ev_type text, i_ev_data text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: londiste.root_notify_change(3) -- -- Send event about change in root downstream. -- ---------------------------------------------------------------------- declare que text; ntype text; begin if not coalesce(pgq_node.is_root_node(i_queue_name), false) then raise exception 'only root node can send events'; end if; perform pgq.insert_event(i_queue_name, i_ev_type, i_ev_data); return 1; end; $$ language plpgsql; skytools-3.2.6/sql/londiste/functions/londiste.find_table_oid.sql0000644000000000000000000000420612426435645022227 0ustar drop function if exists londiste.find_seq_oid(text); drop function if exists londiste.find_table_oid(text); drop function if exists londiste.find_rel_oid(text, text); create or replace function londiste.find_rel_oid(i_fqname text, i_kind text) returns oid as $$ -- ---------------------------------------------------------------------- -- Function: londiste.find_rel_oid(2) -- -- Find pg_class row oid. -- -- Parameters: -- i_fqname - fq object name -- i_kind - relkind value -- -- Returns: -- oid or exception of not found -- ---------------------------------------------------------------------- declare res oid; pos integer; schema text; name text; begin pos := position('.' in i_fqname); if pos > 0 then schema := substring(i_fqname for pos - 1); name := substring(i_fqname from pos + 1); else schema := 'public'; name := i_fqname; end if; select c.oid into res from pg_namespace n, pg_class c where c.relnamespace = n.oid and c.relkind = i_kind and n.nspname = schema and c.relname = name; if not found then res := NULL; end if; return res; end; $$ language plpgsql strict stable; create or replace function londiste.find_table_oid(tbl text) returns oid as $$ -- ---------------------------------------------------------------------- -- Function: londiste.find_table_oid(1) -- -- Find table oid based on fqname. -- -- Parameters: -- tbl - fqname -- -- Returns: -- oid -- ---------------------------------------------------------------------- begin return londiste.find_rel_oid(tbl, 'r'); end; $$ language plpgsql strict stable; create or replace function londiste.find_seq_oid(seq text) returns oid as $$ -- ---------------------------------------------------------------------- -- Function: londiste.find_seq_oid(1) -- -- Find sequence oid based on fqname. -- -- Parameters: -- seq - fqname -- -- Returns: -- oid -- ---------------------------------------------------------------------- begin return londiste.find_rel_oid(seq, 'S'); end; $$ language plpgsql strict stable; skytools-3.2.6/sql/londiste/functions/londiste.find_column_types.sql0000644000000000000000000000221212426435645023021 0ustar create or replace function londiste.find_column_types(tbl text) returns text as $$ -- ---------------------------------------------------------------------- -- Function: londiste.find_column_types(1) -- -- Returns columnt type string for logtriga(). -- -- Parameters: -- tbl - fqname -- -- Returns: -- String of 'kv'. -- ---------------------------------------------------------------------- declare res text; col record; tbl_oid oid; begin tbl_oid := londiste.find_table_oid(tbl); res := ''; for col in SELECT CASE WHEN k.attname IS NOT NULL THEN 'k' ELSE 'v' END AS type FROM pg_attribute a LEFT JOIN ( SELECT k.attname FROM pg_index i, pg_attribute k WHERE i.indrelid = tbl_oid AND k.attrelid = i.indexrelid AND i.indisprimary AND k.attnum > 0 AND NOT k.attisdropped ) k ON (k.attname = a.attname) WHERE a.attrelid = tbl_oid AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum loop res := res || col.type; end loop; return res; end; $$ language plpgsql strict stable; skytools-3.2.6/sql/londiste/functions/londiste.local_set_skip_truncate.sql0000644000000000000000000000153112426435645024203 0ustar create or replace function londiste.local_set_skip_truncate( in i_queue_name text, in i_table text, in i_value bool, out ret_code int4, out ret_note text) returns record as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_set_skip_truncate(3) -- -- Change skip_truncate flag for table. -- ---------------------------------------------------------------------- begin update londiste.table_info set skip_truncate = i_value where queue_name = i_queue_name and table_name = i_table; if found then select 200, 'skip_truncate=' || i_value::text into ret_code, ret_note; else select 404, 'table not found: ' || i_table into ret_code, ret_note; end if; return; end; $$ language plpgsql; skytools-3.2.6/sql/londiste/functions/londiste.local_set_table_struct.sql0000644000000000000000000000175312426435645024031 0ustar create or replace function londiste.local_set_table_struct( in i_queue_name text, in i_table_name text, in i_dropped_ddl text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.local_set_table_struct(3) -- -- Store dropped table struct temporarily. -- -- Parameters: -- i_queue_name - cascaded queue name -- i_table - table name -- i_dropped_ddl - merge state -- ---------------------------------------------------------------------- begin update londiste.table_info set dropped_ddl = i_dropped_ddl where queue_name = i_queue_name and table_name = i_table_name and local; if found then select 200, 'Table struct stored' into ret_code, ret_note; else select 404, 'no such local table: '||i_table_name into ret_code, ret_note; end if; return; end; $$ language plpgsql; skytools-3.2.6/sql/londiste/functions/londiste.quote_fqname.sql0000644000000000000000000000163312426435645021772 0ustar create or replace function londiste.quote_fqname(i_name text) returns text as $$ -- ---------------------------------------------------------------------- -- Function: londiste.quote_fqname(1) -- -- Quete fully-qualified object name for SQL. -- -- First dot is taken as schema separator. -- -- If schema is missing, 'public' is assumed. -- -- Parameters: -- i_name - fully qualified object name. -- -- Returns: -- Quoted name. -- ---------------------------------------------------------------------- declare res text; pos integer; s text; n text; begin pos := position('.' in i_name); if pos > 0 then s := substring(i_name for pos - 1); n := substring(i_name from pos + 1); else s := 'public'; n := i_name; end if; return quote_ident(s) || '.' || quote_ident(n); end; $$ language plpgsql strict immutable; skytools-3.2.6/sql/londiste/functions/londiste.list_obsolete_partitions.sql0000644000000000000000000000403412426435645024427 0ustar create or replace function londiste.list_obsolete_partitions ( in i_parent_table text, in i_retention_period interval, in i_partition_period text ) returns setof text as $$ ------------------------------------------------------------------------------- -- Function: londiste.list_obsolete_partitions(3) -- -- List obsolete partitions of partition-by-date parent table. -- -- Parameters: -- i_parent_table Master table from which partitions are inherited -- i_retention_period How long to keep partitions around -- i_partition_period One of: year, month, day, hour -- -- Returns: -- Names of partitions to be dropped ------------------------------------------------------------------------------- declare _schema text not null := split_part (i_parent_table, '.', 1); _table text not null := split_part (i_parent_table, '.', 2); _part text; _expr text; _dfmt text; begin if i_partition_period in ('year', 'yearly') then _expr := '_[0-9]{4}'; _dfmt := '_YYYY'; elsif i_partition_period in ('month', 'monthly') then _expr := '_[0-9]{4}_[0-9]{2}'; _dfmt := '_YYYY_MM'; elsif i_partition_period in ('day', 'daily') then _expr := '_[0-9]{4}_[0-9]{2}_[0-9]{2}'; _dfmt := '_YYYY_MM_DD'; elsif i_partition_period in ('hour', 'hourly') then _expr := '_[0-9]{4}_[0-9]{2}_[0-9]{2}_[0-9]{2}'; _dfmt := '_YYYY_MM_DD_HH24'; else raise exception 'not supported i_partition_period: %', i_partition_period; end if; if length (_table) = 0 then _table := _schema; _schema := 'public'; end if; for _part in select quote_ident (t.schemaname) ||'.'|| quote_ident (t.tablename) from pg_catalog.pg_tables t where t.schemaname = _schema and t.tablename ~ ('^'|| _table || _expr ||'$') and t.tablename < _table || to_char (now() - i_retention_period, _dfmt) order by 1 loop return next _part; end loop; end; $$ language plpgsql; skytools-3.2.6/sql/londiste/functions/londiste.create_partition.sql0000644000000000000000000002057412426435645022647 0ustar create or replace function londiste.create_partition( i_table text, i_part text, i_pkeys text, i_part_field text, i_part_time timestamptz, i_part_period text ) returns int as $$ ------------------------------------------------------------------------ -- Function: londiste.create_partition -- -- Creates inherited child table if it does not exist by copying parent table's structure. -- Locks londiste.table_info table to avoid parallel creation of any partitions. -- -- Elements that are copied over by "LIKE x INCLUDING ALL": -- * Defaults -- * Constraints -- * Indexes -- * Storage options (9.0+) -- * Comments (9.0+) -- -- Elements that are copied over manually because LIKE ALL does not support them: -- * Grants -- * Triggers -- * Rules -- -- Parameters: -- i_table - name of parent table -- i_part - name of partition table to create -- i_pkeys - primary key fields (comma separated, used to create constraint). -- i_part_field - field used to partition table (when not partitioned by field, value is NULL) -- i_part_time - partition time -- i_part_period - period of partitioned data, current possible values are 'hour', 'day', 'month' and 'year' -- -- Example: -- select londiste.create_partition('aggregate.user_call_monthly', 'aggregate.user_call_monthly_2010_01', 'key_user', 'period_start', '2010-01-10 11:00'::timestamptz, 'month'); -- ------------------------------------------------------------------------ declare chk_start text; chk_end text; part_start timestamptz; part_end timestamptz; parent_schema text; parent_name text; parent_oid oid; part_schema text; part_name text; owner name; pos int4; fq_table text; fq_part text; q_grantee text; g record; r record; tg record; sql text; pgver integer; r_oldtbl text; r_extra text; r_sql text; begin if i_table is null or i_part is null then raise exception 'need table and part'; end if; -- load postgres version (XYYZZ). show server_version_num into pgver; -- parent table schema and name + quoted name pos := position('.' in i_table); if pos > 0 then parent_schema := substring(i_table for pos - 1); parent_name := substring(i_table from pos + 1); else parent_schema := 'public'; parent_name := i_table; end if; fq_table := quote_ident(parent_schema) || '.' || quote_ident(parent_name); -- part table schema and name + quoted name pos := position('.' in i_part); if pos > 0 then part_schema := substring(i_part for pos - 1); part_name := substring(i_part from pos + 1); else part_schema := 'public'; part_name := i_part; end if; fq_part := quote_ident(part_schema) || '.' || quote_ident(part_name); -- allow only single creation at a time, without affecting DML operations -- (changed from locking parent table to avoid deadlocks from concurrent workers) execute 'lock table londiste.table_info in share update exclusive mode'; parent_oid := fq_table::regclass::oid; -- check if part table exists perform 1 from pg_class t, pg_namespace s where t.relnamespace = s.oid and s.nspname = part_schema and t.relname = part_name; if found then return 0; end if; -- need to use 'like' to get indexes sql := 'create table ' || fq_part || ' (like ' || fq_table; if pgver >= 90000 then sql := sql || ' including all'; else sql := sql || ' including indexes including constraints including defaults'; end if; sql := sql || ') inherits (' || fq_table || ')'; execute sql; -- find out parent table owner select o.rolname into owner from pg_class t, pg_namespace s, pg_roles o where t.relnamespace = s.oid and s.nspname = parent_schema and t.relname = parent_name and t.relowner = o.oid; -- set proper part table ownership if owner != user then sql = 'alter table ' || fq_part || ' owner to ' || quote_ident(owner); execute sql; end if; -- extra check constraint if i_part_field != '' then part_start := date_trunc(i_part_period, i_part_time); part_end := part_start + ('1 ' || i_part_period)::interval; chk_start := quote_literal(to_char(part_start, 'YYYY-MM-DD HH24:MI:SS')); chk_end := quote_literal(to_char(part_end, 'YYYY-MM-DD HH24:MI:SS')); sql := 'alter table '|| fq_part || ' add check (' || quote_ident(i_part_field) || ' >= ' || chk_start || ' and ' || quote_ident(i_part_field) || ' < ' || chk_end || ')'; execute sql; end if; -- load grants from parent table for g in select grantor, grantee, privilege_type, is_grantable from information_schema.table_privileges where table_schema = parent_schema and table_name = parent_name loop if g.grantee = 'PUBLIC' then q_grantee = 'public'; else q_grantee := quote_ident(g.grantee); end if; sql := 'grant ' || g.privilege_type || ' on ' || fq_part || ' to ' || q_grantee; if g.is_grantable = 'YES' then sql := sql || ' with grant option'; end if; execute sql; end loop; -- generate triggers info query sql := 'SELECT tgname, tgenabled,' || ' pg_catalog.pg_get_triggerdef(oid) as tgdef' || ' FROM pg_catalog.pg_trigger ' || ' WHERE tgrelid = ' || parent_oid::text || ' AND '; if pgver >= 90000 then sql := sql || ' NOT tgisinternal'; else sql := sql || ' NOT tgisconstraint'; end if; -- copy triggers for tg in execute sql loop sql := regexp_replace(tg.tgdef, E' ON ([[:alnum:]_.]+|"([^"]|"")+")+ ', ' ON ' || fq_part || ' '); if sql = tg.tgdef then raise exception 'Failed to reconstruct the trigger: %', sql; end if; execute sql; if tg.tgenabled = 'O' then -- standard mode r_extra := NULL; elsif tg.tgenabled = 'D' then r_extra := ' DISABLE TRIGGER '; elsif tg.tgenabled = 'A' then r_extra := ' ENABLE ALWAYS TRIGGER '; elsif tg.tgenabled = 'R' then r_extra := ' ENABLE REPLICA TRIGGER '; else raise exception 'Unknown trigger mode: %', tg.tgenabled; end if; if r_extra is not null then sql := 'ALTER TABLE ' || fq_part || r_extra || quote_ident(tg.tgname); execute sql; end if; end loop; -- copy rules for r in select rw.rulename, rw.ev_enabled, pg_catalog.pg_get_ruledef(rw.oid) as definition from pg_catalog.pg_rewrite rw where rw.ev_class = parent_oid and rw.rulename <> '_RETURN'::name loop -- try to skip rule name r_extra := 'CREATE RULE ' || quote_ident(r.rulename) || ' AS'; r_sql := substr(r.definition, 1, char_length(r_extra)); if r_sql = r_extra then r_sql := substr(r.definition, char_length(r_extra)); else raise exception 'failed to match rule name'; end if; -- no clue what name was used in defn, so find it from sql r_oldtbl := substring(r_sql from ' TO (([[:alnum:]_.]+|"([^"]+|"")+")+)[[:space:]]'); if char_length(r_oldtbl) > 0 then sql := replace(r.definition, r_oldtbl, fq_part); else raise exception 'failed to find original table name'; end if; execute sql; -- rule flags r_extra := NULL; if r.ev_enabled = 'R' then r_extra = ' ENABLE REPLICA RULE '; elsif r.ev_enabled = 'A' then r_extra = ' ENABLE ALWAYS RULE '; elsif r.ev_enabled = 'D' then r_extra = ' DISABLE RULE '; elsif r.ev_enabled <> 'O' then raise exception 'unknown rule option: %', r.ev_enabled; end if; if r_extra is not null then sql := 'ALTER TABLE ' || fq_part || r_extra || quote_ident(r.rulename); execute sql; end if; end loop; return 1; end; $$ language plpgsql; skytools-3.2.6/sql/londiste/functions/londiste.get_table_list.sql0000644000000000000000000001444712426435645022276 0ustar drop function if exists londiste.get_table_list(text); create or replace function londiste.get_table_list( in i_queue_name text, out table_name text, out local boolean, out merge_state text, out custom_snapshot text, out table_attrs text, out dropped_ddl text, out copy_role text, out copy_pos int4, out dest_table text) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: londiste.get_table_list(1) -- -- Return info about registered tables. -- -- Parameters: -- i_queue_name - cascaded queue name -- -- Returns: -- table_name - fully-quelified table name -- local - does events needs to be applied to local table -- merge_state - show phase of initial copy -- custom_snapshot - remote snapshot of COPY transaction -- table_attrs - urlencoded dict of table attributes -- dropped_ddl - partition combining: temp place to put DDL -- copy_role - partition combining: how to handle copy -- copy_pos - position in parallel copy working order -- -- copy_role = lead: -- on copy start, drop indexes and store in dropped_ddl -- on copy finish change state to catching-up, then wait until copy_role turns to NULL -- catching-up: if dropped_ddl not NULL, restore them -- copy_role = wait-copy: -- on copy start wait, until role changes (to wait-replay) -- copy_role = wait-replay: -- on copy finish, tag as 'catching-up' -- wait until copy_role is NULL, then proceed -- ---------------------------------------------------------------------- begin for table_name, local, merge_state, custom_snapshot, table_attrs, dropped_ddl, dest_table in select t.table_name, t.local, t.merge_state, t.custom_snapshot, t.table_attrs, t.dropped_ddl, t.dest_table from londiste.table_info t where t.queue_name = i_queue_name order by t.nr, t.table_name loop copy_role := null; copy_pos := 0; if merge_state in ('in-copy', 'catching-up') then select f.copy_role, f.copy_pos from londiste._coordinate_copy(i_queue_name, table_name) f into copy_role, copy_pos; end if; return next; end loop; return; end; $$ language plpgsql strict stable; create or replace function londiste._coordinate_copy( in i_queue_name text, in i_table_name text, out copy_role text, out copy_pos int4) as $$ -- if the table is in middle of copy from multiple partitions, -- the copy processes need coordination. declare q_part1 text; q_part_ddl text; n_parts int4; n_done int4; _table_name text; n_combined_queue text; merge_state text; dest_table text; dropped_ddl text; begin copy_pos := 0; copy_role := null; select t.merge_state, t.dest_table, t.dropped_ddl, min(case when t2.local then t2.queue_name else null end) as _queue1, min(case when t2.local and t2.dropped_ddl is not null then t2.queue_name else null end) as _queue1ddl, count(case when t2.local then t2.table_name else null end) as _total, count(case when t2.local then nullif(t2.merge_state, 'in-copy') else null end) as _done, min(n.combined_queue) as _combined_queue, count(nullif(t2.queue_name < i_queue_name and t.merge_state = 'in-copy' and t2.merge_state = 'in-copy', false)) as _copy_pos from londiste.table_info t join pgq_node.node_info n on (n.queue_name = t.queue_name) left join pgq_node.node_info n2 on (n2.combined_queue = n.combined_queue or (n2.combined_queue is null and n.combined_queue is null)) left join londiste.table_info t2 on (coalesce(t2.dest_table, t2.table_name) = coalesce(t.dest_table, t.table_name) and t2.queue_name = n2.queue_name and (t2.merge_state is null or t2.merge_state != 'ok')) where t.queue_name = i_queue_name and t.table_name = i_table_name group by t.nr, t.table_name, t.local, t.merge_state, t.custom_snapshot, t.table_attrs, t.dropped_ddl, t.dest_table into merge_state, dest_table, dropped_ddl, q_part1, q_part_ddl, n_parts, n_done, n_combined_queue, copy_pos; -- q_part1, q_part_ddl, n_parts, n_done, n_combined_queue, copy_pos, dest_table -- be more robust against late joiners q_part1 := coalesce(q_part_ddl, q_part1); -- turn the logic off if no merge is happening if n_parts = 1 then q_part1 := null; end if; if q_part1 is not null then if i_queue_name = q_part1 then -- lead if merge_state = 'in-copy' then if dropped_ddl is null and n_done > 0 then -- seems late addition, let it copy with indexes copy_role := 'wait-replay'; elsif n_done < n_parts then -- show copy_role only if need to drop ddl or already did drop ddl copy_role := 'lead'; end if; -- make sure it cannot be made to wait copy_pos := 0; end if; if merge_state = 'catching-up' and dropped_ddl is not null then -- show copy_role only if need to wait for others if n_done < n_parts then copy_role := 'wait-replay'; end if; end if; else -- follow if merge_state = 'in-copy' then if q_part_ddl is not null then -- can copy, wait in replay until lead has applied ddl copy_role := 'wait-replay'; elsif n_done > 0 then -- ddl is not dropped, others are active, copy without touching ddl copy_role := 'wait-replay'; else -- wait for lead to drop ddl copy_role := 'wait-copy'; end if; elsif merge_state = 'catching-up' then -- show copy_role only if need to wait for lead if q_part_ddl is not null then copy_role := 'wait-replay'; end if; end if; end if; end if; return; end; $$ language plpgsql strict stable; skytools-3.2.6/sql/londiste/functions/londiste.execute_finish.sql0000644000000000000000000000247412426435645022314 0ustar create or replace function londiste.execute_finish( in i_queue_name text, in i_file_name text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: londiste.execute_finish(2) -- -- Finish execution of DDL. Should be called at the -- end of the transaction that does the SQL execution. -- -- Called-by: -- Londiste setup tool on root, replay on branches/leafs. -- -- Returns: -- 200 - Proceed. -- 404 - Current entry not found, execute_start() was not called? -- ---------------------------------------------------------------------- declare is_root boolean; sql text; attrs text; begin is_root := pgq_node.is_root_node(i_queue_name); select execute_sql, execute_attrs into sql, attrs from londiste.applied_execute where execute_file = i_file_name; if not found then select 404, 'execute_file called without execute_start' into ret_code, ret_note; return; end if; if is_root then perform pgq.insert_event(i_queue_name, 'EXECUTE', sql, i_file_name, attrs, null, null); end if; select 200, 'Execute finished: ' || i_file_name into ret_code, ret_note; return; end; $$ language plpgsql strict; skytools-3.2.6/sql/londiste/functions/londiste.local_change_handler.sql0000644000000000000000000000461512426435645023405 0ustar create or replace function londiste.local_change_handler( in i_queue_name text, in i_table_name text, in i_trg_args text[], in i_table_attrs text, out ret_code int4, out ret_note text) as $$ ---------------------------------------------------------------------------------------------------- -- Function: londiste.local_change_handler(4) -- -- Change handler and rebuild trigger if needed -- -- Parameters: -- i_queue_name - set name -- i_table_name - table name -- i_trg_args - args to trigger -- i_table_attrs - args to python handler -- -- Returns: -- 200 - OK -- 400 - No such set -- 404 - Table not found -- ---------------------------------------------------------------------------------------------------- declare _dest_table text; _desc text; _node record; begin -- get node info select * from pgq_node.get_node_info(i_queue_name) into _node; if not found or _node.ret_code >= 400 then select 400, 'No such set: ' || i_queue_name into ret_code, ret_note; return; end if; -- update table_attrs with new handler info select f.ret_code, f.ret_note from londiste.local_set_table_attrs(i_queue_name, i_table_name, i_table_attrs) f into ret_code, ret_note; if ret_code <> 200 then return; end if; -- get destination table name for use in trigger creation select coalesce(ti.dest_table, i_table_name) from londiste.table_info ti where queue_name = i_queue_name and table_name = i_table_name and local into _dest_table; -- replace the trigger if needed select f.ret_code, f.ret_note from londiste.create_trigger(i_queue_name, i_table_name, i_trg_args, _dest_table, _node.node_type) f into ret_code, ret_note; if _dest_table = i_table_name then _desc := i_table_name; else _desc := i_table_name || '(' || _dest_table || ')'; end if; if ret_code > 299 then ret_note := 'Trigger creation failed for table ' || _desc || ': ' || ret_note; return; elsif ret_code = 201 then select 200, 'Table handler updated with no triggers: ' || _desc into ret_code, ret_note; return; end if; select 200, 'Handler changed for table: ' || _desc into ret_code, ret_note; return; end; $$ language plpgsql; skytools-3.2.6/sql/londiste/functions/londiste.drop_table_triggers.sql0000644000000000000000000000343312426435645023327 0ustar create or replace function londiste.drop_table_triggers( in i_queue_name text, in i_table_name text) returns void as $$ -- ---------------------------------------------------------------------- -- Function: londiste.drop_table_triggers(2) -- -- Remove Londiste triggers from table. -- -- Parameters: -- i_queue_name - set name -- i_table_name - table name -- -- Returns: -- 200 - OK -- 404 - Table not found -- ---------------------------------------------------------------------- declare logtrg_name text; b_queue_name bytea; _dest_table text; begin select coalesce(dest_table, table_name) from londiste.table_info t where t.queue_name = i_queue_name and t.table_name = i_table_name into _dest_table; if not found then return; end if; -- skip if no triggers found on that table perform 1 from pg_catalog.pg_trigger where tgrelid = londiste.find_table_oid(_dest_table); if not found then return; end if; -- cast to bytea b_queue_name := decode(replace(i_queue_name, E'\\', E'\\\\'), 'escape'); -- drop all replication triggers that target our queue. -- by checking trigger func and queue name there is not -- dependency on naming standard or side-storage. for logtrg_name in select tgname from pg_catalog.pg_trigger where tgrelid = londiste.find_table_oid(_dest_table) and londiste.is_replica_func(tgfoid) and octet_length(tgargs) > 0 and substring(tgargs for (position(E'\\000'::bytea in tgargs) - 1)) = b_queue_name loop execute 'drop trigger ' || quote_ident(logtrg_name) || ' on ' || londiste.quote_fqname(_dest_table); end loop; end; $$ language plpgsql strict; skytools-3.2.6/sql/londiste/sql/0000755000000000000000000000000012426435645013531 5ustar skytools-3.2.6/sql/londiste/sql/londiste_create_part.sql0000644000000000000000000000522412426435645020447 0ustar \set ECHO none set log_error_verbosity = 'terse'; set client_min_messages = 'warning'; \set ECHO all drop role if exists londiste_test_part1; drop role if exists londiste_test_part2; create group londiste_test_part1; create group londiste_test_part2; create table events ( id int4 primary key, txt text not null, ctime timestamptz not null default now(), someval int4 check (someval > 0) ); create index ctime_idx on events (ctime); create rule ignore_dups AS on insert to events where (exists (select 1 from events where (events.id = new.id))) do instead nothing; create or replace function "NullTrigger"() returns trigger as $$ begin return null; end; $$ language plpgsql; create trigger "Fooza" after delete on events for each row execute procedure "NullTrigger"(); alter table events enable always trigger "Fooza"; grant select,delete on events to londiste_test_part1; grant select,update,delete on events to londiste_test_part2 with grant option; grant select,insert on events to public; select londiste.create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01', 'month'); select londiste.create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01'::timestamptz, 'month'); select londiste.create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01'::timestamp, 'month'); select count(*) from pg_indexes where schemaname='public' and tablename = 'events_2011_01'; select count(*) from pg_constraint where conrelid = 'public.events_2011_01'::regclass; select count(*) from pg_rules where schemaname = 'public' and tablename = 'events_2011_01'; select trigger_name, event_manipulation, action_statement from information_schema.triggers where event_object_schema = 'public' and event_object_table = 'events_2011_01'; select tgenabled, pg_get_triggerdef(oid) from pg_trigger where tgrelid = 'events_2011_01'::regclass::oid; -- test weird quoting create table "Bad "" table '.' name!" ( id int4 primary key, txt text not null, ctime timestamptz not null default now(), someval int4 check (someval > 0) ); create rule "Ignore Dups" AS on insert to "Bad "" table '.' name!" where (exists (select 1 from "Bad "" table '.' name!" where ("Bad "" table '.' name!".id = new.id))) do instead nothing; alter table "Bad "" table '.' name!" ENABLE ALWAYS RULE "Ignore Dups"; select londiste.create_partition('public.Bad " table ''.'' name!', 'public.Bad " table ''.'' part!', 'id', 'ctime', '2011-01-01', 'month'); select count(*) from pg_rules where schemaname = 'public' and tablename ilike 'bad%'; -- \d events_2011_01 -- \dp events -- \dp events_2011_01 skytools-3.2.6/sql/londiste/sql/londiste_execute.sql0000644000000000000000000000153712426435645017623 0ustar set log_error_verbosity = 'terse'; select * from londiste.execute_start('branch_set', 'DDL-A.sql', 'drop all', false); select * from londiste.execute_start('branch_set', 'DDL-A.sql', 'drop all', false); select * from londiste.execute_finish('branch_set', 'DDL-A.sql'); select * from londiste.execute_finish('branch_set', 'DDL-A.sql'); select * from londiste.execute_finish('branch_set', 'DDL-XXX.sql'); select * from londiste.execute_start('branch_set', 'DDL-B.sql', 'drop all', true); select * from londiste.execute_start('branch_set', 'DDL-B.sql', 'drop all', true); select * from londiste.execute_start('aset', 'DDL-root.sql', 'drop all', true); select * from londiste.execute_start('aset', 'DDL-root.sql', 'drop all', true); select * from londiste.execute_finish('aset', 'DDL-root.sql'); select * from londiste.execute_finish('aset', 'DDL-root.sql'); skytools-3.2.6/sql/londiste/sql/londiste_provider.sql0000644000000000000000000000542712426435645020015 0ustar set client_min_messages = 'warning'; \set VERBOSITY 'terse' -- -- tables -- create table testdata ( id serial primary key, txt text ); create table testdata_nopk ( id serial, txt text ); select current_database(); select * from pgq_node.register_location('aset', 'rnode', 'dbname=db', false); select * from pgq_node.create_node('aset', 'root', 'rnode', 'londiste_root', null::text, null::int8, null::text); select * from londiste.local_add_table('aset', 'public.testdata_nopk'); select * from londiste.local_add_table('aset', 'public.testdata'); select tgname from pg_trigger where tgrelid = 'public.testdata'::regclass order by 1; insert into testdata (txt) values ('test-data'); select * from londiste.get_table_list('aset'); select * from londiste.local_show_missing('aset'); select * from londiste.local_remove_table('aset', 'public.testdata'); select * from londiste.local_remove_table('aset', 'public.testdata'); select tgname from pg_trigger where tgrelid = 'public.testdata'::regclass; select * from londiste.get_table_list('aset'); select ev_id, ev_type, ev_data, ev_extra1 from pgq.event_template; select * from londiste.local_show_missing('aset'); -- trigtest create table trg_test ( id int4 primary key, txt text ); select * from londiste.local_add_table('aset', 'public.trg_test', array['ev_extra4=''test='' || txt']); select * from londiste.local_add_table('aset', 'public.trg_test'); select * from londiste.local_add_table('aset', 'public.trg_test', array['ev_extra4=''test='' || txt'], 'handler=foobar'); insert into trg_test values (1, 'data'); truncate trg_test; select ev_id, ev_type, ev_data, ev_extra1, ev_extra4 from pgq.event_template where ev_extra1 = 'public.trg_test'; select tgname from pg_trigger where tgrelid = 'public.trg_test'::regclass order by 1; delete from londiste.table_info where table_name = 'public.trg_test'; select tgname from pg_trigger where tgrelid = 'public.trg_test'::regclass order by 1; -- handler test create table hdlr_test ( id int4 primary key, txt text ); select * from londiste.local_add_table('aset', 'public.hdlr_test'); insert into hdlr_test values (1, 'data'); select * from londiste.local_change_handler('aset', 'public.hdlr_test', array['ev_extra4=''test='' || txt'], 'handler=foobar'); insert into hdlr_test values (2, 'data2'); select * from londiste.local_change_handler('aset', 'public.hdlr_test', '{}'::text[], ''); insert into hdlr_test values (3, 'data3'); truncate hdlr_test; select ev_id, ev_type, ev_data, ev_extra1, ev_extra4 from pgq.event_template where ev_extra1 = 'public.hdlr_test'; -- test proper trigger creation with add-table specific args select * from londiste.local_add_table('aset', 'public.trg_test', array['ev_extra4=''test='' || txt', 'expect_sync', 'skip']); insert into trg_test values (2, 'data2'); skytools-3.2.6/sql/londiste/sql/londiste_subscriber.sql0000644000000000000000000000251312426435645020317 0ustar set client_min_messages = 'warning'; \set VERBOSITY 'terse' -- -- tables -- create table slavedata ( id serial primary key, data text ); select current_database(); select * from pgq_node.register_location('branch_set', 'snode', 'dbname=db', false); select * from pgq_node.register_location('branch_set', 'pnode', 'dbname=db2', false); select * from pgq_node.create_node('branch_set', 'branch', 'snode', 'londiste_branch', 'pnode', 100, null::text); select * from londiste.local_show_missing('branch_set'); select * from londiste.local_add_table('branch_set', 'public.slavedata'); select * from londiste.global_add_table('branch_set', 'public.slavedata'); select * from londiste.local_add_table('branch_set', 'public.slavedata'); select * from londiste.global_add_table('branch_set', 'public.tmp'); select * from londiste.get_table_list('branch_set'); select * from londiste.local_set_table_state('branch_set', 'public.slavedata', null, 'in-copy'); select * from londiste.get_table_list('branch_set'); select * from londiste.global_remove_table('branch_set', 'public.tmp'); select * from londiste.local_remove_table('branch_set', 'public.slavedata'); select * from londiste.local_remove_table('branch_set', 'public.slavedata'); select * from londiste.get_table_list('branch_set'); select * from londiste.local_show_missing('branch_set'); skytools-3.2.6/sql/londiste/sql/londiste_seqs.sql0000644000000000000000000000377712426435645017144 0ustar set client_min_messages = 'warning'; \set VERBOSITY 'terse' -- -- sequences -- create sequence masterseq; create sequence slaveseq; select * from pgq_node.register_location('seqroot', 'rnode', 'dbname=db', false); select * from pgq_node.create_node('seqroot', 'root', 'rnode', 'londiste_root', null::text, null::int8, null::text); select * from londiste.local_add_seq('seqroot', 'masterseq'); select * from londiste.local_add_seq('seqroot', 'masterseq'); select * from londiste.root_check_seqs('seqroot'); select * from londiste.local_remove_seq('seqroot', 'masterseq'); select * from londiste.local_remove_seq('seqroot', 'masterseq'); select * from londiste.get_seq_list('seqroot'); select ev_id, ev_type, ev_data, ev_extra1 from pgq.event_template where ev_type like '%seq%'; -- subscriber select * from pgq_node.register_location('seqbranch', 'subnode', 'dbname=db', false); select * from pgq_node.register_location('seqbranch', 'rootnode', 'dbname=db', false); select * from pgq_node.create_node('seqbranch', 'branch', 'subnode', 'londiste_branch', 'rootnode', 1, null::text); select * from londiste.local_add_seq('seqbranch', 'masterseq'); select * from londiste.global_update_seq('seqbranch', 'masterseq', 5); select * from londiste.local_add_seq('seqbranch', 'masterseq'); select * from londiste.root_check_seqs('seqbranch'); select * from londiste.get_seq_list('seqbranch'); select * from londiste.local_remove_seq('seqbranch', 'masterseq'); select * from londiste.local_remove_seq('seqbranch', 'masterseq'); -- seq auto-removal create table seqtable ( id1 serial primary key, id2 bigserial not null ); select * from londiste.local_add_table('seqroot', 'seqtable'); select * from londiste.local_add_seq('seqroot', 'seqtable_id1_seq'); select * from londiste.local_add_seq('seqroot', 'seqtable_id2_seq'); select * from londiste.get_table_list('seqroot'); select * from londiste.get_seq_list('seqroot'); select * from londiste.local_remove_table('seqroot', 'seqtable'); select * from londiste.get_seq_list('seqroot'); skytools-3.2.6/sql/londiste/sql/londiste_fkeys.sql0000644000000000000000000000517712426435645017306 0ustar set log_error_verbosity = 'terse'; set client_min_messages = 'warning'; create table ref_1 ( id int4 primary key, val text ); create table ref_2 ( id int4 primary key, ref int4 not null references ref_1, val text ); create table ref_3 ( id int4 primary key, ref2 int4 not null references ref_2, val text ); select * from londiste.global_add_table('branch_set', 'public.ref_1'); select * from londiste.global_add_table('branch_set', 'public.ref_2'); select * from londiste.global_add_table('branch_set', 'public.ref_3'); select * from londiste.local_add_table('branch_set', 'public.ref_1'); select * from londiste.local_add_table('branch_set', 'public.ref_2'); select * from londiste.local_add_table('branch_set', 'public.ref_3'); select * from londiste.find_table_fkeys('public.ref_1'); select * from londiste.find_table_fkeys('public.ref_2'); select * from londiste.find_table_fkeys('public.ref_3'); select * from londiste.get_table_pending_fkeys('public.ref_2'); select * from londiste.get_valid_pending_fkeys('branch_set'); -- drop fkeys select * from londiste.drop_table_fkey('public.ref_2', 'ref_2_ref_fkey'); select * from londiste.find_table_fkeys('public.ref_1'); select * from londiste.find_table_fkeys('public.ref_2'); select * from londiste.find_table_fkeys('public.ref_3'); select * from londiste.drop_table_fkey('public.ref_3', 'ref_3_ref2_fkey'); -- check if dropped select * from londiste.find_table_fkeys('public.ref_1'); select * from londiste.find_table_fkeys('public.ref_2'); select * from londiste.find_table_fkeys('public.ref_3'); -- look state select * from londiste.get_table_pending_fkeys('public.ref_2'); select * from londiste.get_valid_pending_fkeys('branch_set'); -- toggle sync select * from londiste.local_set_table_state('branch_set', 'public.ref_1', null, 'ok'); select * from londiste.get_valid_pending_fkeys('branch_set'); select * from londiste.local_set_table_state('branch_set', 'public.ref_2', null, 'ok'); select * from londiste.get_valid_pending_fkeys('branch_set'); select * from londiste.local_set_table_state('branch_set', 'public.ref_3', null, 'ok'); select * from londiste.get_valid_pending_fkeys('branch_set'); -- restore select * from londiste.restore_table_fkey('public.ref_2', 'ref_2_ref_fkey'); select * from londiste.restore_table_fkey('public.ref_3', 'ref_3_ref2_fkey'); -- look state select * from londiste.get_table_pending_fkeys('public.ref_2'); select * from londiste.get_valid_pending_fkeys('branch_set'); select * from londiste.find_table_fkeys('public.ref_1'); select * from londiste.find_table_fkeys('public.ref_2'); select * from londiste.find_table_fkeys('public.ref_3'); skytools-3.2.6/sql/londiste/sql/init_noext.sql0000644000000000000000000000037212426435645016434 0ustar \set ECHO off set log_error_verbosity = 'terse'; set client_min_messages = 'fatal'; create language plpgsql; set client_min_messages = 'warning'; -- \i ../txid/txid.sql \i ../pgq/pgq.sql \i ../pgq_node/pgq_node.sql \i londiste.sql \set ECHO all skytools-3.2.6/sql/londiste/sql/londiste_merge.sql0000644000000000000000000001407412426435645017260 0ustar set client_min_messages = 'warning'; \set VERBOSITY 'terse' -- -- tables -- create table tblmerge ( id int4 primary key, data text ); select * from pgq_node.register_location('combined_set', 'croot', 'dbname=db', false); select * from pgq_node.create_node('combined_set', 'root', 'croot', 'londiste_croot', null, null, null); select * from pgq_node.register_location('part1_set', 'p1root', 'dbname=db', false); select * from pgq_node.register_location('part1_set', 'p1merge', 'dbname=db2', false); select * from pgq_node.create_node('part1_set', 'leaf', 'p1merge', 'londiste_p1merge', 'p1root', 100, 'combined_set'); select * from pgq_node.register_location('part2_set', 'p2root', 'dbname=db', false); select * from pgq_node.register_location('part2_set', 'p2merge', 'dbname=db2', false); select * from pgq_node.create_node('part2_set', 'leaf', 'p2merge', 'londiste_p2merge', 'p2root', 100, 'combined_set'); select * from pgq_node.register_location('part3_set', 'p3root', 'dbname=db', false); select * from pgq_node.register_location('part3_set', 'p3merge', 'dbname=db3', false); select * from pgq_node.create_node('part3_set', 'leaf', 'p3merge', 'londiste_p3merge', 'p3root', 100, 'combined_set'); select * from londiste.local_add_table('combined_set', 'tblmerge'); select * from londiste.global_add_table('part1_set', 'tblmerge'); select * from londiste.global_add_table('part2_set', 'tblmerge'); select * from londiste.local_add_table('part1_set', 'tblmerge', array['merge_all']); select * from londiste.get_table_list('part1_set'); select * from londiste.get_table_list('part2_set'); select * from londiste.get_table_list('combined_set'); select * from londiste.local_set_table_state('part1_set', 'public.tblmerge', null, 'in-copy'); select * from londiste.local_set_table_state('part2_set', 'public.tblmerge', null, 'in-copy'); select * from londiste.get_table_list('part1_set'); select * from londiste.get_table_list('part2_set'); select * from londiste.local_set_table_struct('part1_set', 'public.tblmerge', 'create index;'); select * from londiste.get_table_list('part1_set'); select * from londiste.get_table_list('part2_set'); select * from londiste.local_set_table_state('part2_set', 'public.tblmerge', null, 'catching-up'); select * from londiste.get_table_list('part1_set'); select * from londiste.get_table_list('part2_set'); select * from londiste.local_set_table_state('part1_set', 'public.tblmerge', null, 'catching-up'); select * from londiste.get_table_list('part1_set'); select * from londiste.get_table_list('part2_set'); select * from londiste.local_set_table_struct('part1_set', 'public.tblmerge', null); select * from londiste.get_table_list('part1_set'); select * from londiste.get_table_list('part2_set'); -- test automatic registration on combined-root select * from londiste.global_add_table('part1_set', 'tblauto'); select * from londiste.global_add_table('part2_set', 'tblauto'); select * from londiste.local_add_table('part1_set', 'tblauto', array['merge_all', 'virtual_table'], 'handler=vtable'); select * from londiste.get_table_list('part2_set'); select * from londiste.get_table_list('combined_set'); -- -- Test all combinations on 3-node merge -- select * from londiste.global_add_table('part3_set', 'tblmerge'); \set ECHO off create table states ( state text ); insert into states values ('in-copy'); insert into states values ('!in-copy'); insert into states values ('catching-up'); insert into states values ('!catching-up'); create or replace function testmerge( in p1state text, in p2state text, in p3state text, out p1res text, out p2res text, out p3res text) as $$ declare p1ddl text; p2ddl text; p3ddl text; tbl text = 'public.tblmerge'; begin if position('!' in p1state) > 0 then p1ddl := 'x'; end if; if position('!' in p2state) > 0 then p2ddl := 'x'; end if; if position('!' in p3state) > 0 then p3ddl := 'x'; end if; update londiste.table_info set merge_state = replace(p1state, '!', ''), dropped_ddl = p1ddl, local = true where table_name = tbl and queue_name = 'part1_set'; update londiste.table_info set merge_state = replace(p2state, '!', ''), dropped_ddl = p2ddl, local = true where table_name = tbl and queue_name = 'part2_set'; update londiste.table_info set merge_state = replace(p3state, '!', ''), dropped_ddl = p3ddl, local = true where table_name = tbl and queue_name = 'part3_set'; select coalesce(copy_role, 'NULL') from londiste.get_table_list('part1_set') where table_name = tbl into p1res; select coalesce(copy_role, 'NULL') from londiste.get_table_list('part2_set') where table_name = tbl into p2res; select coalesce(copy_role, 'NULL') from londiste.get_table_list('part3_set') where table_name = tbl into p3res; return; end; $$ language plpgsql; create function testmatrix( out p1s text, out p2s text, out p3s text, out p1r text, out p2r text, out p3r text) returns setof record as $$ begin for p1s, p2s, p3s in select p1.state::name, p2.state::name, p3.state::name from states p1, states p2, states p3 where position('!' in p1.state) + position('!' in p2.state) + position('!' in p3.state) < 2 order by 1,2,3 loop select * from testmerge(p1s, p2s, p3s) into p1r, p2r, p3r; return next; end loop; return; end; $$ language plpgsql; \set ECHO all select * from testmatrix(); -- test dropped ddl restore create table ddlrestore ( id int4, data1 text, data2 text ); select count(*) from pg_indexes where schemaname='public' and tablename='ddlrestore'; insert into londiste.table_info (queue_name, table_name, local, merge_state, dropped_ddl) values ('part1_set', 'public.ddlrestore', true, 'in-copy', ' ALTER TABLE ddlrestore ADD CONSTRAINT cli_pkey PRIMARY KEY (id); CREATE INDEX idx_data1 ON ddlrestore USING btree (data1); CREATE INDEX idx_data2 ON ddlrestore USING btree (data2); '); select * from londiste.local_remove_table('part1_set', 'public.ddlrestore'); select count(*) from pg_indexes where schemaname='public' and tablename='ddlrestore'; skytools-3.2.6/sql/londiste/sql/londiste_install.sql0000644000000000000000000000037212426435645017623 0ustar \set ECHO off set log_error_verbosity = 'terse'; set client_min_messages = 'fatal'; create language plpgsql; set client_min_messages = 'warning'; -- \i ../txid/txid.sql \i ../pgq/pgq.sql \i ../pgq_node/pgq_node.sql \i londiste.sql \set ECHO all skytools-3.2.6/sql/londiste/sql/londiste_leaf.sql0000644000000000000000000000262512426435645017067 0ustar set client_min_messages = 'warning'; \set VERBOSITY 'terse' select 1 from (select set_config(name, 'escape', false) as ignore from pg_settings where name = 'bytea_output') x where x.ignore = 'foo'; -- -- tables -- create table leafdata ( id serial primary key, data text ); select current_database(); select * from pgq_node.register_location('leafq', 'lq_node1', 'dbname=db', false); select * from pgq_node.register_location('leafq', 'lq_node2', 'dbname=db2', false); select * from pgq_node.create_node('leafq', 'leaf', 'lq_node2', 'londiste_leaf', 'lq_node1', 100, null::text); select * from londiste.local_show_missing('leafq'); select * from londiste.local_add_table('leafq', 'public.leafdata'); select * from londiste.global_add_table('leafq', 'public.leafdata'); select * from londiste.local_add_table('leafq', 'public.leafdata'); select * from londiste.global_add_table('leafq', 'public.tmp'); select * from londiste.get_table_list('leafq'); select tgname, tgargs from pg_trigger where tgrelid = 'public.leafdata'::regclass order by 1; insert into leafdata values (1, 'asd'); select * from londiste.global_remove_table('leafq', 'public.tmp'); select * from londiste.local_remove_table('leafq', 'public.leafdata'); select * from londiste.local_remove_table('leafq', 'public.leafdata'); select * from londiste.get_table_list('leafq'); select * from londiste.local_show_missing('leafq'); skytools-3.2.6/sql/londiste/sql/init_ext.sql0000644000000000000000000000103012426435645016067 0ustar \set ECHO off set log_error_verbosity = 'terse'; set client_min_messages = 'fatal'; create language plpgsql; set client_min_messages = 'warning'; create extension pgq; create extension pgq_node; \i londiste.sql \set ECHO all create extension londiste from 'unpackaged'; select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'londiste'; drop extension londiste; create extension londiste; select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'londiste'; skytools-3.2.6/sql/londiste/expected/0000755000000000000000000000000012426435645014533 5ustar skytools-3.2.6/sql/londiste/expected/londiste_subscriber.out0000644000000000000000000001072712426435645021337 0ustar set client_min_messages = 'warning'; \set VERBOSITY 'terse' -- -- tables -- create table slavedata ( id serial primary key, data text ); select current_database(); current_database ------------------ regression (1 row) select * from pgq_node.register_location('branch_set', 'snode', 'dbname=db', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.register_location('branch_set', 'pnode', 'dbname=db2', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.create_node('branch_set', 'branch', 'snode', 'londiste_branch', 'pnode', 100, null::text); ret_code | ret_note ----------+-------------------------------------------------------------------- 200 | Node "snode" initialized for queue "branch_set" with type "branch" (1 row) select * from londiste.local_show_missing('branch_set'); obj_kind | obj_name ----------+---------- (0 rows) select * from londiste.local_add_table('branch_set', 'public.slavedata'); ret_code | ret_note ----------+------------------------------------------------ 404 | Table not available on queue: public.slavedata (1 row) select * from londiste.global_add_table('branch_set', 'public.slavedata'); ret_code | ret_note ----------+------------------------------- 200 | Table added: public.slavedata (1 row) select * from londiste.local_add_table('branch_set', 'public.slavedata'); ret_code | ret_note ----------+------------------------------- 200 | Table added: public.slavedata (1 row) select * from londiste.global_add_table('branch_set', 'public.tmp'); ret_code | ret_note ----------+------------------------- 200 | Table added: public.tmp (1 row) select * from londiste.get_table_list('branch_set'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table ------------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ public.slavedata | t | | | | | | 0 | public.tmp | f | | | | | | 0 | (2 rows) select * from londiste.local_set_table_state('branch_set', 'public.slavedata', null, 'in-copy'); ret_code | ret_note ----------+----------------------------------------------- 200 | Table public.slavedata state set to 'in-copy' (1 row) select * from londiste.get_table_list('branch_set'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table ------------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ public.slavedata | t | in-copy | | | | | 0 | public.tmp | f | | | | | | 0 | (2 rows) select * from londiste.global_remove_table('branch_set', 'public.tmp'); ret_code | ret_note ----------+--------------------------- 200 | Table removed: public.tmp (1 row) select * from londiste.local_remove_table('branch_set', 'public.slavedata'); ret_code | ret_note ----------+--------------------------------- 200 | Table removed: public.slavedata (1 row) select * from londiste.local_remove_table('branch_set', 'public.slavedata'); ret_code | ret_note ----------+------------------------------------------------ 400 | Table not registered locally: public.slavedata (1 row) select * from londiste.get_table_list('branch_set'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table ------------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ public.slavedata | f | | | | | | 0 | (1 row) select * from londiste.local_show_missing('branch_set'); obj_kind | obj_name ----------+------------------ r | public.slavedata (1 row) skytools-3.2.6/sql/londiste/expected/londiste_provider.out0000644000000000000000000001620212426435645021020 0ustar set client_min_messages = 'warning'; \set VERBOSITY 'terse' -- -- tables -- create table testdata ( id serial primary key, txt text ); create table testdata_nopk ( id serial, txt text ); select current_database(); current_database ------------------ regression (1 row) select * from pgq_node.register_location('aset', 'rnode', 'dbname=db', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.create_node('aset', 'root', 'rnode', 'londiste_root', null::text, null::int8, null::text); ret_code | ret_note ----------+------------------------------------------------------------ 200 | Node "rnode" initialized for queue "aset" with type "root" (1 row) select * from londiste.local_add_table('aset', 'public.testdata_nopk'); ret_code | ret_note ----------+---------------------------------------------------- 400 | Primary key missing on table: public.testdata_nopk (1 row) select * from londiste.local_add_table('aset', 'public.testdata'); ret_code | ret_note ----------+------------------------------ 200 | Table added: public.testdata (1 row) select tgname from pg_trigger where tgrelid = 'public.testdata'::regclass order by 1; tgname ------------------------- _londiste_aset _londiste_aset_truncate (2 rows) insert into testdata (txt) values ('test-data'); select * from londiste.get_table_list('aset'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ public.testdata | t | ok | | | | | 0 | (1 row) select * from londiste.local_show_missing('aset'); obj_kind | obj_name ----------+----------------------------- S | public.testdata_id_seq S | public.testdata_nopk_id_seq r | public.testdata_nopk (3 rows) select * from londiste.local_remove_table('aset', 'public.testdata'); ret_code | ret_note ----------+-------------------------------- 200 | Table removed: public.testdata (1 row) select * from londiste.local_remove_table('aset', 'public.testdata'); ret_code | ret_note ----------+---------------------------------- 400 | Table not found: public.testdata (1 row) select tgname from pg_trigger where tgrelid = 'public.testdata'::regclass; tgname -------- (0 rows) select * from londiste.get_table_list('aset'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table ------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ (0 rows) select ev_id, ev_type, ev_data, ev_extra1 from pgq.event_template; ev_id | ev_type | ev_data | ev_extra1 -------+-----------------------+--------------------+----------------- 1 | londiste.add-table | public.testdata | 2 | I:id | id=1&txt=test-data | public.testdata 3 | londiste.remove-table | public.testdata | (3 rows) select * from londiste.local_show_missing('aset'); obj_kind | obj_name ----------+----------------------------- S | public.testdata_id_seq S | public.testdata_nopk_id_seq r | public.testdata r | public.testdata_nopk (4 rows) -- trigtest create table trg_test ( id int4 primary key, txt text ); select * from londiste.local_add_table('aset', 'public.trg_test', array['ev_extra4=''test='' || txt']); ret_code | ret_note ----------+------------------------------ 200 | Table added: public.trg_test (1 row) select * from londiste.local_add_table('aset', 'public.trg_test'); ret_code | ret_note ----------+-------------------------------------- 200 | Table already added: public.trg_test (1 row) select * from londiste.local_add_table('aset', 'public.trg_test', array['ev_extra4=''test='' || txt'], 'handler=foobar'); ret_code | ret_note ----------+---------------------------------------------------------------- 410 | Table public.trg_test already added, but with different args: (1 row) insert into trg_test values (1, 'data'); truncate trg_test; select ev_id, ev_type, ev_data, ev_extra1, ev_extra4 from pgq.event_template where ev_extra1 = 'public.trg_test'; ev_id | ev_type | ev_data | ev_extra1 | ev_extra4 -------+---------+---------------+-----------------+----------- 5 | I:id | id=1&txt=data | public.trg_test | test=data 6 | R | | public.trg_test | (2 rows) select tgname from pg_trigger where tgrelid = 'public.trg_test'::regclass order by 1; tgname ------------------------- _londiste_aset _londiste_aset_truncate (2 rows) delete from londiste.table_info where table_name = 'public.trg_test'; select tgname from pg_trigger where tgrelid = 'public.trg_test'::regclass order by 1; tgname -------- (0 rows) -- handler test create table hdlr_test ( id int4 primary key, txt text ); select * from londiste.local_add_table('aset', 'public.hdlr_test'); ret_code | ret_note ----------+------------------------------- 200 | Table added: public.hdlr_test (1 row) insert into hdlr_test values (1, 'data'); select * from londiste.local_change_handler('aset', 'public.hdlr_test', array['ev_extra4=''test='' || txt'], 'handler=foobar'); ret_code | ret_note ----------+--------------------------------------------- 200 | Handler changed for table: public.hdlr_test (1 row) insert into hdlr_test values (2, 'data2'); select * from londiste.local_change_handler('aset', 'public.hdlr_test', '{}'::text[], ''); ret_code | ret_note ----------+--------------------------------------------- 200 | Handler changed for table: public.hdlr_test (1 row) insert into hdlr_test values (3, 'data3'); truncate hdlr_test; select ev_id, ev_type, ev_data, ev_extra1, ev_extra4 from pgq.event_template where ev_extra1 = 'public.hdlr_test'; ev_id | ev_type | ev_data | ev_extra1 | ev_extra4 -------+---------+----------------+------------------+------------ 8 | I:id | id=1&txt=data | public.hdlr_test | 9 | I:id | id=2&txt=data2 | public.hdlr_test | test=data2 10 | I:id | id=3&txt=data3 | public.hdlr_test | 11 | R | | public.hdlr_test | (4 rows) -- test proper trigger creation with add-table specific args select * from londiste.local_add_table('aset', 'public.trg_test', array['ev_extra4=''test='' || txt', 'expect_sync', 'skip']); ret_code | ret_note ----------+------------------------------ 200 | Table added: public.trg_test (1 row) insert into trg_test values (2, 'data2'); ERROR: SKIP does not work in AFTER trigger. skytools-3.2.6/sql/londiste/expected/londiste_leaf.out0000644000000000000000000001021512426435645020073 0ustar set client_min_messages = 'warning'; \set VERBOSITY 'terse' select 1 from (select set_config(name, 'escape', false) as ignore from pg_settings where name = 'bytea_output') x where x.ignore = 'foo'; ?column? ---------- (0 rows) -- -- tables -- create table leafdata ( id serial primary key, data text ); select current_database(); current_database ------------------ regression (1 row) select * from pgq_node.register_location('leafq', 'lq_node1', 'dbname=db', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.register_location('leafq', 'lq_node2', 'dbname=db2', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.create_node('leafq', 'leaf', 'lq_node2', 'londiste_leaf', 'lq_node1', 100, null::text); ret_code | ret_note ----------+---------------------------------------------------------------- 200 | Node "lq_node2" initialized for queue "leafq" with type "leaf" (1 row) select * from londiste.local_show_missing('leafq'); obj_kind | obj_name ----------+---------- (0 rows) select * from londiste.local_add_table('leafq', 'public.leafdata'); ret_code | ret_note ----------+----------------------------------------------- 404 | Table not available on queue: public.leafdata (1 row) select * from londiste.global_add_table('leafq', 'public.leafdata'); ret_code | ret_note ----------+------------------------------ 200 | Table added: public.leafdata (1 row) select * from londiste.local_add_table('leafq', 'public.leafdata'); ret_code | ret_note ----------+------------------------------ 200 | Table added: public.leafdata (1 row) select * from londiste.global_add_table('leafq', 'public.tmp'); ret_code | ret_note ----------+------------------------- 200 | Table added: public.tmp (1 row) select * from londiste.get_table_list('leafq'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ public.leafdata | t | | | | | | 0 | public.tmp | f | | | | | | 0 | (2 rows) select tgname, tgargs from pg_trigger where tgrelid = 'public.leafdata'::regclass order by 1; tgname | tgargs --------------------------+------------------- _londiste_leafq | leafq\000deny\000 _londiste_leafq_truncate | leafq\000deny\000 (2 rows) insert into leafdata values (1, 'asd'); ERROR: Table 'public.leafdata' to queue 'leafq': change not allowed (I) select * from londiste.global_remove_table('leafq', 'public.tmp'); ret_code | ret_note ----------+--------------------------- 200 | Table removed: public.tmp (1 row) select * from londiste.local_remove_table('leafq', 'public.leafdata'); ret_code | ret_note ----------+-------------------------------- 200 | Table removed: public.leafdata (1 row) select * from londiste.local_remove_table('leafq', 'public.leafdata'); ret_code | ret_note ----------+----------------------------------------------- 400 | Table not registered locally: public.leafdata (1 row) select * from londiste.get_table_list('leafq'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ public.leafdata | f | | | | | | 0 | (1 row) select * from londiste.local_show_missing('leafq'); obj_kind | obj_name ----------+----------------- r | public.leafdata (1 row) skytools-3.2.6/sql/londiste/expected/londiste_install.out0000644000000000000000000000027712426435645020641 0ustar \set ECHO off upgrade_schema ---------------- 0 (1 row) upgrade_schema ---------------- 0 (1 row) upgrade_schema ---------------- 0 (1 row) skytools-3.2.6/sql/londiste/expected/init_noext_1.out0000644000000000000000000000027712426435645017672 0ustar \set ECHO off upgrade_schema ---------------- 0 (1 row) upgrade_schema ---------------- 0 (1 row) upgrade_schema ---------------- 0 (1 row) skytools-3.2.6/sql/londiste/expected/londiste_merge.out0000644000000000000000000004020612426435645020266 0ustar set client_min_messages = 'warning'; \set VERBOSITY 'terse' -- -- tables -- create table tblmerge ( id int4 primary key, data text ); select * from pgq_node.register_location('combined_set', 'croot', 'dbname=db', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.create_node('combined_set', 'root', 'croot', 'londiste_croot', null, null, null); ret_code | ret_note ----------+-------------------------------------------------------------------- 200 | Node "croot" initialized for queue "combined_set" with type "root" (1 row) select * from pgq_node.register_location('part1_set', 'p1root', 'dbname=db', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.register_location('part1_set', 'p1merge', 'dbname=db2', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.create_node('part1_set', 'leaf', 'p1merge', 'londiste_p1merge', 'p1root', 100, 'combined_set'); ret_code | ret_note ----------+------------------------------------------------------------------- 200 | Node "p1merge" initialized for queue "part1_set" with type "leaf" (1 row) select * from pgq_node.register_location('part2_set', 'p2root', 'dbname=db', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.register_location('part2_set', 'p2merge', 'dbname=db2', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.create_node('part2_set', 'leaf', 'p2merge', 'londiste_p2merge', 'p2root', 100, 'combined_set'); ret_code | ret_note ----------+------------------------------------------------------------------- 200 | Node "p2merge" initialized for queue "part2_set" with type "leaf" (1 row) select * from pgq_node.register_location('part3_set', 'p3root', 'dbname=db', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.register_location('part3_set', 'p3merge', 'dbname=db3', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.create_node('part3_set', 'leaf', 'p3merge', 'londiste_p3merge', 'p3root', 100, 'combined_set'); ret_code | ret_note ----------+------------------------------------------------------------------- 200 | Node "p3merge" initialized for queue "part3_set" with type "leaf" (1 row) select * from londiste.local_add_table('combined_set', 'tblmerge'); ret_code | ret_note ----------+------------------------------ 200 | Table added: public.tblmerge (1 row) select * from londiste.global_add_table('part1_set', 'tblmerge'); ret_code | ret_note ----------+----------------------- 200 | Table added: tblmerge (1 row) select * from londiste.global_add_table('part2_set', 'tblmerge'); ret_code | ret_note ----------+----------------------- 200 | Table added: tblmerge (1 row) select * from londiste.local_add_table('part1_set', 'tblmerge', array['merge_all']); ret_code | ret_note ----------+------------------------------ 200 | Table added: public.tblmerge (1 row) select * from londiste.get_table_list('part1_set'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ public.tblmerge | t | | | | | | 0 | (1 row) select * from londiste.get_table_list('part2_set'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ public.tblmerge | t | | | | | | 0 | (1 row) select * from londiste.get_table_list('combined_set'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ public.tblmerge | t | ok | | | | | 0 | (1 row) select * from londiste.local_set_table_state('part1_set', 'public.tblmerge', null, 'in-copy'); ret_code | ret_note ----------+---------------------------------------------- 200 | Table public.tblmerge state set to 'in-copy' (1 row) select * from londiste.local_set_table_state('part2_set', 'public.tblmerge', null, 'in-copy'); ret_code | ret_note ----------+---------------------------------------------- 200 | Table public.tblmerge state set to 'in-copy' (1 row) select * from londiste.get_table_list('part1_set'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ public.tblmerge | t | in-copy | | | | lead | 0 | (1 row) select * from londiste.get_table_list('part2_set'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ public.tblmerge | t | in-copy | | | | wait-copy | 1 | (1 row) select * from londiste.local_set_table_struct('part1_set', 'public.tblmerge', 'create index;'); ret_code | ret_note ----------+--------------------- 200 | Table struct stored (1 row) select * from londiste.get_table_list('part1_set'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+---------------+-----------+----------+------------ public.tblmerge | t | in-copy | | | create index; | lead | 0 | (1 row) select * from londiste.get_table_list('part2_set'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+-------------+-------------+----------+------------ public.tblmerge | t | in-copy | | | | wait-replay | 1 | (1 row) select * from londiste.local_set_table_state('part2_set', 'public.tblmerge', null, 'catching-up'); ret_code | ret_note ----------+-------------------------------------------------- 200 | Table public.tblmerge state set to 'catching-up' (1 row) select * from londiste.get_table_list('part1_set'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+---------------+-----------+----------+------------ public.tblmerge | t | in-copy | | | create index; | lead | 0 | (1 row) select * from londiste.get_table_list('part2_set'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+-------------+-------------+----------+------------ public.tblmerge | t | catching-up | | | | wait-replay | 0 | (1 row) select * from londiste.local_set_table_state('part1_set', 'public.tblmerge', null, 'catching-up'); ret_code | ret_note ----------+-------------------------------------------------- 200 | Table public.tblmerge state set to 'catching-up' (1 row) select * from londiste.get_table_list('part1_set'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+---------------+-----------+----------+------------ public.tblmerge | t | catching-up | | | create index; | | 0 | (1 row) select * from londiste.get_table_list('part2_set'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+-------------+-------------+----------+------------ public.tblmerge | t | catching-up | | | | wait-replay | 0 | (1 row) select * from londiste.local_set_table_struct('part1_set', 'public.tblmerge', null); ret_code | ret_note ----------+--------------------- 200 | Table struct stored (1 row) select * from londiste.get_table_list('part1_set'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ public.tblmerge | t | catching-up | | | | | 0 | (1 row) select * from londiste.get_table_list('part2_set'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ public.tblmerge | t | catching-up | | | | | 0 | (1 row) -- test automatic registration on combined-root select * from londiste.global_add_table('part1_set', 'tblauto'); ret_code | ret_note ----------+---------------------- 200 | Table added: tblauto (1 row) select * from londiste.global_add_table('part2_set', 'tblauto'); ret_code | ret_note ----------+---------------------- 200 | Table added: tblauto (1 row) select * from londiste.local_add_table('part1_set', 'tblauto', array['merge_all', 'virtual_table'], 'handler=vtable'); ret_code | ret_note ----------+---------------------------------------------- 200 | Table added with no triggers: public.tblauto (1 row) select * from londiste.get_table_list('part2_set'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+----------------+-------------+-----------+----------+------------ public.tblmerge | t | catching-up | | | | | 0 | public.tblauto | t | ok | | handler=vtable | | | 0 | (2 rows) select * from londiste.get_table_list('combined_set'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+----------------+-------------+-----------+----------+------------ public.tblmerge | t | ok | | | | | 0 | public.tblauto | t | ok | | handler=vtable | | | 0 | (2 rows) -- -- Test all combinations on 3-node merge -- select * from londiste.global_add_table('part3_set', 'tblmerge'); ret_code | ret_note ----------+----------------------- 200 | Table added: tblmerge (1 row) \set ECHO off select * from testmatrix(); p1s | p2s | p3s | p1r | p2r | p3r --------------+--------------+--------------+-------------+-------------+------------- !catching-up | catching-up | catching-up | NULL | wait-replay | wait-replay !catching-up | catching-up | in-copy | wait-replay | wait-replay | wait-replay !catching-up | in-copy | catching-up | wait-replay | wait-replay | wait-replay !catching-up | in-copy | in-copy | wait-replay | wait-replay | wait-replay !in-copy | catching-up | catching-up | lead | wait-replay | wait-replay !in-copy | catching-up | in-copy | lead | wait-replay | wait-replay !in-copy | in-copy | catching-up | lead | wait-replay | wait-replay !in-copy | in-copy | in-copy | lead | wait-replay | wait-replay catching-up | !catching-up | catching-up | wait-replay | NULL | wait-replay catching-up | !catching-up | in-copy | wait-replay | wait-replay | wait-replay catching-up | !in-copy | catching-up | wait-replay | lead | wait-replay catching-up | !in-copy | in-copy | wait-replay | lead | wait-replay catching-up | catching-up | !catching-up | wait-replay | wait-replay | NULL catching-up | catching-up | !in-copy | wait-replay | wait-replay | lead catching-up | catching-up | catching-up | NULL | NULL | NULL catching-up | catching-up | in-copy | NULL | NULL | wait-replay catching-up | in-copy | !catching-up | wait-replay | wait-replay | wait-replay catching-up | in-copy | !in-copy | wait-replay | wait-replay | lead catching-up | in-copy | catching-up | NULL | wait-replay | NULL catching-up | in-copy | in-copy | NULL | wait-replay | wait-replay in-copy | !catching-up | catching-up | wait-replay | wait-replay | wait-replay in-copy | !catching-up | in-copy | wait-replay | wait-replay | wait-replay in-copy | !in-copy | catching-up | wait-replay | lead | wait-replay in-copy | !in-copy | in-copy | wait-replay | lead | wait-replay in-copy | catching-up | !catching-up | wait-replay | wait-replay | wait-replay in-copy | catching-up | !in-copy | wait-replay | wait-replay | lead in-copy | catching-up | catching-up | wait-replay | NULL | NULL in-copy | catching-up | in-copy | wait-replay | NULL | wait-replay in-copy | in-copy | !catching-up | wait-replay | wait-replay | wait-replay in-copy | in-copy | !in-copy | wait-replay | wait-replay | lead in-copy | in-copy | catching-up | wait-replay | wait-replay | NULL in-copy | in-copy | in-copy | lead | wait-copy | wait-copy (32 rows) -- test dropped ddl restore create table ddlrestore ( id int4, data1 text, data2 text ); select count(*) from pg_indexes where schemaname='public' and tablename='ddlrestore'; count ------- 0 (1 row) insert into londiste.table_info (queue_name, table_name, local, merge_state, dropped_ddl) values ('part1_set', 'public.ddlrestore', true, 'in-copy', ' ALTER TABLE ddlrestore ADD CONSTRAINT cli_pkey PRIMARY KEY (id); CREATE INDEX idx_data1 ON ddlrestore USING btree (data1); CREATE INDEX idx_data2 ON ddlrestore USING btree (data2); '); select * from londiste.local_remove_table('part1_set', 'public.ddlrestore'); ret_code | ret_note ----------+---------------------------------- 200 | Table removed: public.ddlrestore (1 row) select count(*) from pg_indexes where schemaname='public' and tablename='ddlrestore'; count ------- 3 (1 row) skytools-3.2.6/sql/londiste/expected/londiste_seqs.out0000644000000000000000000001365512426435645020152 0ustar set client_min_messages = 'warning'; \set VERBOSITY 'terse' -- -- sequences -- create sequence masterseq; create sequence slaveseq; select * from pgq_node.register_location('seqroot', 'rnode', 'dbname=db', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.create_node('seqroot', 'root', 'rnode', 'londiste_root', null::text, null::int8, null::text); ret_code | ret_note ----------+--------------------------------------------------------------- 200 | Node "rnode" initialized for queue "seqroot" with type "root" (1 row) select * from londiste.local_add_seq('seqroot', 'masterseq'); ret_code | ret_note ----------+---------------------------------- 200 | Sequence added: public.masterseq (1 row) select * from londiste.local_add_seq('seqroot', 'masterseq'); ret_code | ret_note ----------+------------------------------------------ 201 | Sequence already added: public.masterseq (1 row) select * from londiste.root_check_seqs('seqroot'); ret_code | ret_note ----------+------------------- 100 | Sequences updated (1 row) select * from londiste.local_remove_seq('seqroot', 'masterseq'); ret_code | ret_note ----------+------------------------------------ 200 | Sequence removed: public.masterseq (1 row) select * from londiste.local_remove_seq('seqroot', 'masterseq'); ret_code | ret_note ----------+-------------------------------------- 400 | Sequence not found: public.masterseq (1 row) select * from londiste.get_seq_list('seqroot'); seq_name | last_value | local ----------+------------+------- (0 rows) select ev_id, ev_type, ev_data, ev_extra1 from pgq.event_template where ev_type like '%seq%'; ev_id | ev_type | ev_data | ev_extra1 -------+---------------------+------------------+------------------ 1 | londiste.update-seq | 30001 | public.masterseq 2 | londiste.remove-seq | public.masterseq | (2 rows) -- subscriber select * from pgq_node.register_location('seqbranch', 'subnode', 'dbname=db', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.register_location('seqbranch', 'rootnode', 'dbname=db', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.create_node('seqbranch', 'branch', 'subnode', 'londiste_branch', 'rootnode', 1, null::text); ret_code | ret_note ----------+--------------------------------------------------------------------- 200 | Node "subnode" initialized for queue "seqbranch" with type "branch" (1 row) select * from londiste.local_add_seq('seqbranch', 'masterseq'); ret_code | ret_note ----------+------------------------------------ 404 | Unknown sequence: public.masterseq (1 row) select * from londiste.global_update_seq('seqbranch', 'masterseq', 5); ret_code | ret_note ----------+------------------ 200 | Sequence updated (1 row) select * from londiste.local_add_seq('seqbranch', 'masterseq'); ret_code | ret_note ----------+---------------------------------- 200 | Sequence added: public.masterseq (1 row) select * from londiste.root_check_seqs('seqbranch'); ret_code | ret_note ----------+----------------- 402 | Not a root node (1 row) select * from londiste.get_seq_list('seqbranch'); seq_name | last_value | local ------------------+------------+------- public.masterseq | 5 | t (1 row) select * from londiste.local_remove_seq('seqbranch', 'masterseq'); ret_code | ret_note ----------+------------------------------------ 200 | Sequence removed: public.masterseq (1 row) select * from londiste.local_remove_seq('seqbranch', 'masterseq'); ret_code | ret_note ----------+-------------------------------------- 404 | Sequence not found: public.masterseq (1 row) -- seq auto-removal create table seqtable ( id1 serial primary key, id2 bigserial not null ); select * from londiste.local_add_table('seqroot', 'seqtable'); ret_code | ret_note ----------+------------------------------ 200 | Table added: public.seqtable (1 row) select * from londiste.local_add_seq('seqroot', 'seqtable_id1_seq'); ret_code | ret_note ----------+----------------------------------------- 200 | Sequence added: public.seqtable_id1_seq (1 row) select * from londiste.local_add_seq('seqroot', 'seqtable_id2_seq'); ret_code | ret_note ----------+----------------------------------------- 200 | Sequence added: public.seqtable_id2_seq (1 row) select * from londiste.get_table_list('seqroot'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ public.seqtable | t | ok | | | | | 0 | (1 row) select * from londiste.get_seq_list('seqroot'); seq_name | last_value | local -------------------------+------------+------- public.seqtable_id1_seq | 30001 | t public.seqtable_id2_seq | 30001 | t (2 rows) select * from londiste.local_remove_table('seqroot', 'seqtable'); ret_code | ret_note ----------+-------------------------------- 200 | Table removed: public.seqtable (1 row) select * from londiste.get_seq_list('seqroot'); seq_name | last_value | local ----------+------------+------- (0 rows) skytools-3.2.6/sql/londiste/expected/londiste_create_part.out0000644000000000000000000000676412426435645021473 0ustar \set ECHO none drop role if exists londiste_test_part1; drop role if exists londiste_test_part2; create group londiste_test_part1; create group londiste_test_part2; create table events ( id int4 primary key, txt text not null, ctime timestamptz not null default now(), someval int4 check (someval > 0) ); create index ctime_idx on events (ctime); create rule ignore_dups AS on insert to events where (exists (select 1 from events where (events.id = new.id))) do instead nothing; create or replace function "NullTrigger"() returns trigger as $$ begin return null; end; $$ language plpgsql; create trigger "Fooza" after delete on events for each row execute procedure "NullTrigger"(); alter table events enable always trigger "Fooza"; grant select,delete on events to londiste_test_part1; grant select,update,delete on events to londiste_test_part2 with grant option; grant select,insert on events to public; select londiste.create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01', 'month'); create_partition ------------------ 1 (1 row) select londiste.create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01'::timestamptz, 'month'); create_partition ------------------ 0 (1 row) select londiste.create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01'::timestamp, 'month'); create_partition ------------------ 0 (1 row) select count(*) from pg_indexes where schemaname='public' and tablename = 'events_2011_01'; count ------- 2 (1 row) select count(*) from pg_constraint where conrelid = 'public.events_2011_01'::regclass; count ------- 3 (1 row) select count(*) from pg_rules where schemaname = 'public' and tablename = 'events_2011_01'; count ------- 1 (1 row) select trigger_name, event_manipulation, action_statement from information_schema.triggers where event_object_schema = 'public' and event_object_table = 'events_2011_01'; trigger_name | event_manipulation | action_statement --------------+--------------------+----------------------------------- Fooza | DELETE | EXECUTE PROCEDURE "NullTrigger"() (1 row) select tgenabled, pg_get_triggerdef(oid) from pg_trigger where tgrelid = 'events_2011_01'::regclass::oid; tgenabled | pg_get_triggerdef -----------+------------------------------------------------------------------------------------------------------ A | CREATE TRIGGER "Fooza" AFTER DELETE ON events_2011_01 FOR EACH ROW EXECUTE PROCEDURE "NullTrigger"() (1 row) -- test weird quoting create table "Bad "" table '.' name!" ( id int4 primary key, txt text not null, ctime timestamptz not null default now(), someval int4 check (someval > 0) ); create rule "Ignore Dups" AS on insert to "Bad "" table '.' name!" where (exists (select 1 from "Bad "" table '.' name!" where ("Bad "" table '.' name!".id = new.id))) do instead nothing; alter table "Bad "" table '.' name!" ENABLE ALWAYS RULE "Ignore Dups"; select londiste.create_partition('public.Bad " table ''.'' name!', 'public.Bad " table ''.'' part!', 'id', 'ctime', '2011-01-01', 'month'); create_partition ------------------ 1 (1 row) select count(*) from pg_rules where schemaname = 'public' and tablename ilike 'bad%'; count ------- 2 (1 row) -- \d events_2011_01 -- \dp events -- \dp events_2011_01 skytools-3.2.6/sql/londiste/expected/londiste_fkeys.out0000644000000000000000000002512412426435645020312 0ustar set log_error_verbosity = 'terse'; set client_min_messages = 'warning'; create table ref_1 ( id int4 primary key, val text ); create table ref_2 ( id int4 primary key, ref int4 not null references ref_1, val text ); create table ref_3 ( id int4 primary key, ref2 int4 not null references ref_2, val text ); select * from londiste.global_add_table('branch_set', 'public.ref_1'); ret_code | ret_note ----------+--------------------------- 200 | Table added: public.ref_1 (1 row) select * from londiste.global_add_table('branch_set', 'public.ref_2'); ret_code | ret_note ----------+--------------------------- 200 | Table added: public.ref_2 (1 row) select * from londiste.global_add_table('branch_set', 'public.ref_3'); ret_code | ret_note ----------+--------------------------- 200 | Table added: public.ref_3 (1 row) select * from londiste.local_add_table('branch_set', 'public.ref_1'); ret_code | ret_note ----------+--------------------------- 200 | Table added: public.ref_1 (1 row) select * from londiste.local_add_table('branch_set', 'public.ref_2'); ret_code | ret_note ----------+--------------------------- 200 | Table added: public.ref_2 (1 row) select * from londiste.local_add_table('branch_set', 'public.ref_3'); ret_code | ret_note ----------+--------------------------- 200 | Table added: public.ref_3 (1 row) select * from londiste.find_table_fkeys('public.ref_1'); from_table | to_table | fkey_name | fkey_def --------------+--------------+----------------+---------------------------------------------------------------------------------------------------- public.ref_2 | public.ref_1 | ref_2_ref_fkey | alter table only public.ref_2 add constraint ref_2_ref_fkey FOREIGN KEY (ref) REFERENCES ref_1(id) (1 row) select * from londiste.find_table_fkeys('public.ref_2'); from_table | to_table | fkey_name | fkey_def --------------+--------------+-----------------+------------------------------------------------------------------------------------------------------ public.ref_2 | public.ref_1 | ref_2_ref_fkey | alter table only public.ref_2 add constraint ref_2_ref_fkey FOREIGN KEY (ref) REFERENCES ref_1(id) public.ref_3 | public.ref_2 | ref_3_ref2_fkey | alter table only public.ref_3 add constraint ref_3_ref2_fkey FOREIGN KEY (ref2) REFERENCES ref_2(id) (2 rows) select * from londiste.find_table_fkeys('public.ref_3'); from_table | to_table | fkey_name | fkey_def --------------+--------------+-----------------+------------------------------------------------------------------------------------------------------ public.ref_3 | public.ref_2 | ref_3_ref2_fkey | alter table only public.ref_3 add constraint ref_3_ref2_fkey FOREIGN KEY (ref2) REFERENCES ref_2(id) (1 row) select * from londiste.get_table_pending_fkeys('public.ref_2'); from_table | to_table | fkey_name | fkey_def ------------+----------+-----------+---------- (0 rows) select * from londiste.get_valid_pending_fkeys('branch_set'); from_table | to_table | fkey_name | fkey_def ------------+----------+-----------+---------- (0 rows) -- drop fkeys select * from londiste.drop_table_fkey('public.ref_2', 'ref_2_ref_fkey'); drop_table_fkey ----------------- 1 (1 row) select * from londiste.find_table_fkeys('public.ref_1'); from_table | to_table | fkey_name | fkey_def ------------+----------+-----------+---------- (0 rows) select * from londiste.find_table_fkeys('public.ref_2'); from_table | to_table | fkey_name | fkey_def --------------+--------------+-----------------+------------------------------------------------------------------------------------------------------ public.ref_3 | public.ref_2 | ref_3_ref2_fkey | alter table only public.ref_3 add constraint ref_3_ref2_fkey FOREIGN KEY (ref2) REFERENCES ref_2(id) (1 row) select * from londiste.find_table_fkeys('public.ref_3'); from_table | to_table | fkey_name | fkey_def --------------+--------------+-----------------+------------------------------------------------------------------------------------------------------ public.ref_3 | public.ref_2 | ref_3_ref2_fkey | alter table only public.ref_3 add constraint ref_3_ref2_fkey FOREIGN KEY (ref2) REFERENCES ref_2(id) (1 row) select * from londiste.drop_table_fkey('public.ref_3', 'ref_3_ref2_fkey'); drop_table_fkey ----------------- 1 (1 row) -- check if dropped select * from londiste.find_table_fkeys('public.ref_1'); from_table | to_table | fkey_name | fkey_def ------------+----------+-----------+---------- (0 rows) select * from londiste.find_table_fkeys('public.ref_2'); from_table | to_table | fkey_name | fkey_def ------------+----------+-----------+---------- (0 rows) select * from londiste.find_table_fkeys('public.ref_3'); from_table | to_table | fkey_name | fkey_def ------------+----------+-----------+---------- (0 rows) -- look state select * from londiste.get_table_pending_fkeys('public.ref_2'); from_table | to_table | fkey_name | fkey_def --------------+--------------+-----------------+------------------------------------------------------------------------------------------------------ public.ref_2 | public.ref_1 | ref_2_ref_fkey | alter table only public.ref_2 add constraint ref_2_ref_fkey FOREIGN KEY (ref) REFERENCES ref_1(id) public.ref_3 | public.ref_2 | ref_3_ref2_fkey | alter table only public.ref_3 add constraint ref_3_ref2_fkey FOREIGN KEY (ref2) REFERENCES ref_2(id) (2 rows) select * from londiste.get_valid_pending_fkeys('branch_set'); from_table | to_table | fkey_name | fkey_def ------------+----------+-----------+---------- (0 rows) -- toggle sync select * from londiste.local_set_table_state('branch_set', 'public.ref_1', null, 'ok'); ret_code | ret_note ----------+-------------------------------------- 200 | Table public.ref_1 state set to 'ok' (1 row) select * from londiste.get_valid_pending_fkeys('branch_set'); from_table | to_table | fkey_name | fkey_def ------------+----------+-----------+---------- (0 rows) select * from londiste.local_set_table_state('branch_set', 'public.ref_2', null, 'ok'); ret_code | ret_note ----------+-------------------------------------- 200 | Table public.ref_2 state set to 'ok' (1 row) select * from londiste.get_valid_pending_fkeys('branch_set'); from_table | to_table | fkey_name | fkey_def --------------+--------------+----------------+---------------------------------------------------------------------------------------------------- public.ref_2 | public.ref_1 | ref_2_ref_fkey | alter table only public.ref_2 add constraint ref_2_ref_fkey FOREIGN KEY (ref) REFERENCES ref_1(id) (1 row) select * from londiste.local_set_table_state('branch_set', 'public.ref_3', null, 'ok'); ret_code | ret_note ----------+-------------------------------------- 200 | Table public.ref_3 state set to 'ok' (1 row) select * from londiste.get_valid_pending_fkeys('branch_set'); from_table | to_table | fkey_name | fkey_def --------------+--------------+-----------------+------------------------------------------------------------------------------------------------------ public.ref_2 | public.ref_1 | ref_2_ref_fkey | alter table only public.ref_2 add constraint ref_2_ref_fkey FOREIGN KEY (ref) REFERENCES ref_1(id) public.ref_3 | public.ref_2 | ref_3_ref2_fkey | alter table only public.ref_3 add constraint ref_3_ref2_fkey FOREIGN KEY (ref2) REFERENCES ref_2(id) (2 rows) -- restore select * from londiste.restore_table_fkey('public.ref_2', 'ref_2_ref_fkey'); restore_table_fkey -------------------- 1 (1 row) select * from londiste.restore_table_fkey('public.ref_3', 'ref_3_ref2_fkey'); restore_table_fkey -------------------- 1 (1 row) -- look state select * from londiste.get_table_pending_fkeys('public.ref_2'); from_table | to_table | fkey_name | fkey_def ------------+----------+-----------+---------- (0 rows) select * from londiste.get_valid_pending_fkeys('branch_set'); from_table | to_table | fkey_name | fkey_def ------------+----------+-----------+---------- (0 rows) select * from londiste.find_table_fkeys('public.ref_1'); from_table | to_table | fkey_name | fkey_def --------------+--------------+----------------+---------------------------------------------------------------------------------------------------- public.ref_2 | public.ref_1 | ref_2_ref_fkey | alter table only public.ref_2 add constraint ref_2_ref_fkey FOREIGN KEY (ref) REFERENCES ref_1(id) (1 row) select * from londiste.find_table_fkeys('public.ref_2'); from_table | to_table | fkey_name | fkey_def --------------+--------------+-----------------+------------------------------------------------------------------------------------------------------ public.ref_2 | public.ref_1 | ref_2_ref_fkey | alter table only public.ref_2 add constraint ref_2_ref_fkey FOREIGN KEY (ref) REFERENCES ref_1(id) public.ref_3 | public.ref_2 | ref_3_ref2_fkey | alter table only public.ref_3 add constraint ref_3_ref2_fkey FOREIGN KEY (ref2) REFERENCES ref_2(id) (2 rows) select * from londiste.find_table_fkeys('public.ref_3'); from_table | to_table | fkey_name | fkey_def --------------+--------------+-----------------+------------------------------------------------------------------------------------------------------ public.ref_3 | public.ref_2 | ref_3_ref2_fkey | alter table only public.ref_3 add constraint ref_3_ref2_fkey FOREIGN KEY (ref2) REFERENCES ref_2(id) (1 row) skytools-3.2.6/sql/londiste/expected/londiste_leaf_1.out0000644000000000000000000001010312426435645020307 0ustar set client_min_messages = 'warning'; \set VERBOSITY 'terse' select 1 from (select set_config(name, 'escape', false) as ignore from pg_settings where name = 'bytea_output') x where x.ignore = 'foo'; ?column? ---------- (0 rows) -- -- tables -- create table leafdata ( id serial primary key, data text ); select current_database(); current_database ------------------ regression (1 row) select * from pgq_node.register_location('leafq', 'lq_node1', 'dbname=db', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.register_location('leafq', 'lq_node2', 'dbname=db2', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.create_node('leafq', 'leaf', 'lq_node2', 'londiste_leaf', 'lq_node1', 100, null::text); ret_code | ret_note ----------+---------------------------------------------------------------- 200 | Node "lq_node2" initialized for queue "leafq" with type "leaf" (1 row) select * from londiste.local_show_missing('leafq'); obj_kind | obj_name ----------+---------- (0 rows) select * from londiste.local_add_table('leafq', 'public.leafdata'); ret_code | ret_note ----------+----------------------------------------------- 404 | Table not available on queue: public.leafdata (1 row) select * from londiste.global_add_table('leafq', 'public.leafdata'); ret_code | ret_note ----------+------------------------------ 200 | Table added: public.leafdata (1 row) select * from londiste.local_add_table('leafq', 'public.leafdata'); ret_code | ret_note ----------+------------------------------ 200 | Table added: public.leafdata (1 row) select * from londiste.global_add_table('leafq', 'public.tmp'); ret_code | ret_note ----------+------------------------- 200 | Table added: public.tmp (1 row) select * from londiste.get_table_list('leafq'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ public.leafdata | t | | | | | | 0 | public.tmp | f | | | | | | 0 | (2 rows) select tgname, tgargs from pg_trigger where tgrelid = 'public.leafdata'::regclass order by 1; tgname | tgargs -----------------+------------------- _londiste_leafq | leafq\000deny\000 (1 row) insert into leafdata values (1, 'asd'); ERROR: Table 'public.leafdata' to queue 'leafq': change not allowed (I) select * from londiste.global_remove_table('leafq', 'public.tmp'); ret_code | ret_note ----------+--------------------------- 200 | Table removed: public.tmp (1 row) select * from londiste.local_remove_table('leafq', 'public.leafdata'); ret_code | ret_note ----------+-------------------------------- 200 | Table removed: public.leafdata (1 row) select * from londiste.local_remove_table('leafq', 'public.leafdata'); ret_code | ret_note ----------+----------------------------------------------- 400 | Table not registered locally: public.leafdata (1 row) select * from londiste.get_table_list('leafq'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ public.leafdata | f | | | | | | 0 | (1 row) select * from londiste.local_show_missing('leafq'); obj_kind | obj_name ----------+----------------- r | public.leafdata (1 row) skytools-3.2.6/sql/londiste/expected/init_noext.out0000644000000000000000000000027712426435645017452 0ustar \set ECHO off upgrade_schema ---------------- 0 (1 row) upgrade_schema ---------------- 0 (1 row) upgrade_schema ---------------- 0 (1 row) skytools-3.2.6/sql/londiste/expected/londiste_execute.out0000644000000000000000000000466212426435645020637 0ustar set log_error_verbosity = 'terse'; select * from londiste.execute_start('branch_set', 'DDL-A.sql', 'drop all', false); ret_code | ret_note ----------+---------------------- 200 | Executing: DDL-A.sql (1 row) select * from londiste.execute_start('branch_set', 'DDL-A.sql', 'drop all', false); ret_code | ret_note ----------+------------------------------------------------ 201 | EXECUTE: "DDL-A.sql" already applied, skipping (1 row) select * from londiste.execute_finish('branch_set', 'DDL-A.sql'); ret_code | ret_note ----------+----------------------------- 200 | Execute finished: DDL-A.sql (1 row) select * from londiste.execute_finish('branch_set', 'DDL-A.sql'); ret_code | ret_note ----------+----------------------------- 200 | Execute finished: DDL-A.sql (1 row) select * from londiste.execute_finish('branch_set', 'DDL-XXX.sql'); ret_code | ret_note ----------+------------------------------------------- 404 | execute_file called without execute_start (1 row) select * from londiste.execute_start('branch_set', 'DDL-B.sql', 'drop all', true); ret_code | ret_note ----------+----------------------------------- 401 | Node is not root node: branch_set (1 row) select * from londiste.execute_start('branch_set', 'DDL-B.sql', 'drop all', true); ret_code | ret_note ----------+----------------------------------- 401 | Node is not root node: branch_set (1 row) select * from londiste.execute_start('aset', 'DDL-root.sql', 'drop all', true); ret_code | ret_note ----------+------------------------- 200 | Executing: DDL-root.sql (1 row) select * from londiste.execute_start('aset', 'DDL-root.sql', 'drop all', true); ret_code | ret_note ----------+--------------------------------------------------- 201 | EXECUTE: "DDL-root.sql" already applied, skipping (1 row) select * from londiste.execute_finish('aset', 'DDL-root.sql'); ret_code | ret_note ----------+-------------------------------- 200 | Execute finished: DDL-root.sql (1 row) select * from londiste.execute_finish('aset', 'DDL-root.sql'); ret_code | ret_note ----------+-------------------------------- 200 | Execute finished: DDL-root.sql (1 row) skytools-3.2.6/sql/londiste/expected/init_ext.out0000644000000000000000000000071212426435645017107 0ustar \set ECHO off upgrade_schema ---------------- 0 (1 row) create extension londiste from 'unpackaged'; select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'londiste'; dumpable ---------- 4 (1 row) drop extension londiste; create extension londiste; select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'londiste'; dumpable ---------- 4 (1 row) skytools-3.2.6/sql/londiste/expected/londiste_provider_1.out0000644000000000000000000001567312426435645021253 0ustar set client_min_messages = 'warning'; \set VERBOSITY 'terse' -- -- tables -- create table testdata ( id serial primary key, txt text ); create table testdata_nopk ( id serial, txt text ); select current_database(); current_database ------------------ regression (1 row) select * from pgq_node.register_location('aset', 'rnode', 'dbname=db', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.create_node('aset', 'root', 'rnode', 'londiste_root', null::text, null::int8, null::text); ret_code | ret_note ----------+------------------------------------------------------------ 200 | Node "rnode" initialized for queue "aset" with type "root" (1 row) select * from londiste.local_add_table('aset', 'public.testdata_nopk'); ret_code | ret_note ----------+---------------------------------------------------- 400 | Primary key missing on table: public.testdata_nopk (1 row) select * from londiste.local_add_table('aset', 'public.testdata'); ret_code | ret_note ----------+------------------------------ 200 | Table added: public.testdata (1 row) select tgname from pg_trigger where tgrelid = 'public.testdata'::regclass order by 1; tgname ---------------- _londiste_aset (1 row) insert into testdata (txt) values ('test-data'); select * from londiste.get_table_list('aset'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table -----------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ public.testdata | t | ok | | | | | 0 | (1 row) select * from londiste.local_show_missing('aset'); obj_kind | obj_name ----------+----------------------------- S | public.testdata_id_seq S | public.testdata_nopk_id_seq r | public.testdata_nopk (3 rows) select * from londiste.local_remove_table('aset', 'public.testdata'); ret_code | ret_note ----------+-------------------------------- 200 | Table removed: public.testdata (1 row) select * from londiste.local_remove_table('aset', 'public.testdata'); ret_code | ret_note ----------+---------------------------------- 400 | Table not found: public.testdata (1 row) select tgname from pg_trigger where tgrelid = 'public.testdata'::regclass; tgname -------- (0 rows) select * from londiste.get_table_list('aset'); table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table ------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ (0 rows) select ev_id, ev_type, ev_data, ev_extra1 from pgq.event_template; ev_id | ev_type | ev_data | ev_extra1 -------+-----------------------+--------------------+----------------- 1 | londiste.add-table | public.testdata | 2 | I:id | id=1&txt=test-data | public.testdata 3 | londiste.remove-table | public.testdata | (3 rows) select * from londiste.local_show_missing('aset'); obj_kind | obj_name ----------+----------------------------- S | public.testdata_id_seq S | public.testdata_nopk_id_seq r | public.testdata r | public.testdata_nopk (4 rows) -- trigtest create table trg_test ( id int4 primary key, txt text ); select * from londiste.local_add_table('aset', 'public.trg_test', array['ev_extra4=''test='' || txt']); ret_code | ret_note ----------+------------------------------ 200 | Table added: public.trg_test (1 row) select * from londiste.local_add_table('aset', 'public.trg_test'); ret_code | ret_note ----------+-------------------------------------- 200 | Table already added: public.trg_test (1 row) select * from londiste.local_add_table('aset', 'public.trg_test', array['ev_extra4=''test='' || txt'], 'handler=foobar'); ret_code | ret_note ----------+---------------------------------------------------------------- 410 | Table public.trg_test already added, but with different args: (1 row) insert into trg_test values (1, 'data'); truncate trg_test; select ev_id, ev_type, ev_data, ev_extra1, ev_extra4 from pgq.event_template where ev_extra1 = 'public.trg_test'; ev_id | ev_type | ev_data | ev_extra1 | ev_extra4 -------+---------+---------------+-----------------+----------- 5 | I:id | id=1&txt=data | public.trg_test | test=data (1 row) select tgname from pg_trigger where tgrelid = 'public.trg_test'::regclass order by 1; tgname ---------------- _londiste_aset (1 row) delete from londiste.table_info where table_name = 'public.trg_test'; select tgname from pg_trigger where tgrelid = 'public.trg_test'::regclass order by 1; tgname -------- (0 rows) -- handler test create table hdlr_test ( id int4 primary key, txt text ); select * from londiste.local_add_table('aset', 'public.hdlr_test'); ret_code | ret_note ----------+------------------------------- 200 | Table added: public.hdlr_test (1 row) insert into hdlr_test values (1, 'data'); select * from londiste.local_change_handler('aset', 'public.hdlr_test', array['ev_extra4=''test='' || txt'], 'handler=foobar'); ret_code | ret_note ----------+--------------------------------------------- 200 | Handler changed for table: public.hdlr_test (1 row) insert into hdlr_test values (2, 'data2'); select * from londiste.local_change_handler('aset', 'public.hdlr_test', '{}'::text[], ''); ret_code | ret_note ----------+--------------------------------------------- 200 | Handler changed for table: public.hdlr_test (1 row) insert into hdlr_test values (3, 'data3'); truncate hdlr_test; select ev_id, ev_type, ev_data, ev_extra1, ev_extra4 from pgq.event_template where ev_extra1 = 'public.hdlr_test'; ev_id | ev_type | ev_data | ev_extra1 | ev_extra4 -------+---------+----------------+------------------+------------ 7 | I:id | id=1&txt=data | public.hdlr_test | 8 | I:id | id=2&txt=data2 | public.hdlr_test | test=data2 9 | I:id | id=3&txt=data3 | public.hdlr_test | (3 rows) -- test proper trigger creation with add-table specific args select * from londiste.local_add_table('aset', 'public.trg_test', array['ev_extra4=''test='' || txt', 'expect_sync', 'skip']); ret_code | ret_note ----------+------------------------------ 200 | Table added: public.trg_test (1 row) insert into trg_test values (2, 'data2'); ERROR: SKIP does not work in AFTER trigger. skytools-3.2.6/sql/londiste/expected/init_ext_1.out0000644000000000000000000000071212426435645017327 0ustar \set ECHO off upgrade_schema ---------------- 2 (1 row) create extension londiste from 'unpackaged'; select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'londiste'; dumpable ---------- 4 (1 row) drop extension londiste; create extension londiste; select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'londiste'; dumpable ---------- 4 (1 row) skytools-3.2.6/sql/londiste/docs/0000755000000000000000000000000012426435645013662 5ustar skytools-3.2.6/sql/londiste/docs/Topics.txt0000644000000000000000000000650512426435645015672 0ustar Format: 1.52 # This is the Natural Docs topics file for this project. If you change anything # here, it will apply to THIS PROJECT ONLY. If you'd like to change something # for all your projects, edit the Topics.txt in Natural Docs' Config directory # instead. # If you'd like to prevent keywords from being recognized by Natural Docs, you # can do it like this: # Ignore Keywords: [keyword], [keyword], ... # # Or you can use the list syntax like how they are defined: # Ignore Keywords: # [keyword] # [keyword], [plural keyword] # ... #------------------------------------------------------------------------------- # SYNTAX: # # Topic Type: [name] # Alter Topic Type: [name] # Creates a new topic type or alters one from the main file. Each type gets # its own index and behavior settings. Its name can have letters, numbers, # spaces, and these charaters: - / . ' # # Plural: [name] # Sets the plural name of the topic type, if different. # # Keywords: # [keyword] # [keyword], [plural keyword] # ... # Defines or adds to the list of keywords for the topic type. They may only # contain letters, numbers, and spaces and are not case sensitive. Plural # keywords are used for list topics. You can redefine keywords found in the # main topics file. # # Index: [yes|no] # Whether the topics get their own index. Defaults to yes. Everything is # included in the general index regardless of this setting. # # Scope: [normal|start|end|always global] # How the topics affects scope. Defaults to normal. # normal - Topics stay within the current scope. # start - Topics start a new scope for all the topics beneath it, # like class topics. # end - Topics reset the scope back to global for all the topics # beneath it. # always global - Topics are defined as global, but do not change the scope # for any other topics. # # Class Hierarchy: [yes|no] # Whether the topics are part of the class hierarchy. Defaults to no. # # Page Title If First: [yes|no] # Whether the topic's title becomes the page title if it's the first one in # a file. Defaults to no. # # Break Lists: [yes|no] # Whether list topics should be broken into individual topics in the output. # Defaults to no. # # Can Group With: [type], [type], ... # Defines a list of topic types that this one can possibly be grouped with. # Defaults to none. #------------------------------------------------------------------------------- # The following topics are defined in the main file, if you'd like to alter # their behavior or add keywords: # # Generic, Class, Interface, Section, File, Group, Function, Variable, # Property, Type, Constant, Enumeration, Event, Delegate, Macro, # Database, Database Table, Database View, Database Index, Database # Cursor, Database Trigger, Cookie, Build Target # If you add something that you think would be useful to other developers # and should be included in Natural Docs by default, please e-mail it to # topics [at] naturaldocs [dot] org. Topic Type: Schema Plural: Schemas Index: No Scope: Start Class Hierarchy: Yes Keywords: schema, schemas Alter Topic Type: Function Add Keywords: public function internal function Alter Topic Type: File Index: No skytools-3.2.6/sql/londiste/docs/Languages.txt0000644000000000000000000001202112426435645016325 0ustar Format: 1.52 # This is the Natural Docs languages file for this project. If you change # anything here, it will apply to THIS PROJECT ONLY. If you'd like to change # something for all your projects, edit the Languages.txt in Natural Docs' # Config directory instead. Ignore Extension: sql #------------------------------------------------------------------------------- # SYNTAX: # # Unlike other Natural Docs configuration files, in this file all comments # MUST be alone on a line. Some languages deal with the # character, so you # cannot put comments on the same line as content. # # Also, all lists are separated with spaces, not commas, again because some # languages may need to use them. # # Language: [name] # Alter Language: [name] # Defines a new language or alters an existing one. Its name can use any # characters. If any of the properties below have an add/replace form, you # must use that when using Alter Language. # # The language Shebang Script is special. It's entry is only used for # extensions, and files with those extensions have their shebang (#!) lines # read to determine the real language of the file. Extensionless files are # always treated this way. # # The language Text File is also special. It's treated as one big comment # so you can put Natural Docs content in them without special symbols. Also, # if you don't specify a package separator, ignored prefixes, or enum value # behavior, it will copy those settings from the language that is used most # in the source tree. # # Extensions: [extension] [extension] ... # [Add/Replace] Extensions: [extension] [extension] ... # Defines the file extensions of the language's source files. You can # redefine extensions found in the main languages file. You can use * to # mean any undefined extension. # # Shebang Strings: [string] [string] ... # [Add/Replace] Shebang Strings: [string] [string] ... # Defines a list of strings that can appear in the shebang (#!) line to # designate that it's part of the language. You can redefine strings found # in the main languages file. # # Ignore Prefixes in Index: [prefix] [prefix] ... # [Add/Replace] Ignored Prefixes in Index: [prefix] [prefix] ... # # Ignore [Topic Type] Prefixes in Index: [prefix] [prefix] ... # [Add/Replace] Ignored [Topic Type] Prefixes in Index: [prefix] [prefix] ... # Specifies prefixes that should be ignored when sorting symbols in an # index. Can be specified in general or for a specific topic type. # #------------------------------------------------------------------------------ # For basic language support only: # # Line Comments: [symbol] [symbol] ... # Defines a space-separated list of symbols that are used for line comments, # if any. # # Block Comments: [opening sym] [closing sym] [opening sym] [closing sym] ... # Defines a space-separated list of symbol pairs that are used for block # comments, if any. # # Package Separator: [symbol] # Defines the default package separator symbol. The default is a dot. # # [Topic Type] Prototype Enders: [symbol] [symbol] ... # When defined, Natural Docs will attempt to get a prototype from the code # immediately following the topic type. It stops when it reaches one of # these symbols. Use \n for line breaks. # # Line Extender: [symbol] # Defines the symbol that allows a prototype to span multiple lines if # normally a line break would end it. # # Enum Values: [global|under type|under parent] # Defines how enum values are referenced. The default is global. # global - Values are always global, referenced as 'value'. # under type - Values are under the enum type, referenced as # 'package.enum.value'. # under parent - Values are under the enum's parent, referenced as # 'package.value'. # # Perl Package: [perl package] # Specifies the Perl package used to fine-tune the language behavior in ways # too complex to do in this file. # #------------------------------------------------------------------------------ # For full language support only: # # Full Language Support: [perl package] # Specifies the Perl package that has the parsing routines necessary for full # language support. # #------------------------------------------------------------------------------- # The following languages are defined in the main file, if you'd like to alter # them: # # Text File, Shebang Script, C/C++, C#, Java, JavaScript, Perl, Python, # PHP, SQL, Visual Basic, Pascal, Assembly, Ada, Tcl, Ruby, Makefile, # ActionScript, ColdFusion, R, Fortran # If you add a language that you think would be useful to other developers # and should be included in Natural Docs by default, please e-mail it to # languages [at] naturaldocs [dot] org. Language: PLPGSQL Extension: sql Line Comment: -- Block Comment: /* */ Enum Values: Global Function Prototype Enders: , ; ) $ ' Variable Prototype Enders: , ; ) := default Default DEFAULT Database Index Prototype Enders: , ; ) Database Trigger Prototype Enders: begin Begin BEGIN skytools-3.2.6/sql/londiste/docs/Menu.txt0000644000000000000000000000373112426435645015333 0ustar Format: 1.52 # You can add a title and sub-title to your menu like this: # Title: [project name] # SubTitle: [subtitle] # You can add a footer to your documentation like this: # Footer: [text] # If you want to add a copyright notice, this would be the place to do it. # You can add a timestamp to your documentation like one of these: # Timestamp: Generated on month day, year # Timestamp: Updated mm/dd/yyyy # Timestamp: Last updated mon day # # m - One or two digit month. January is "1" # mm - Always two digit month. January is "01" # mon - Short month word. January is "Jan" # month - Long month word. January is "January" # d - One or two digit day. 1 is "1" # dd - Always two digit day. 1 is "01" # day - Day with letter extension. 1 is "1st" # yy - Two digit year. 2006 is "06" # yyyy - Four digit year. 2006 is "2006" # year - Four digit year. 2006 is "2006" # -------------------------------------------------------------------------- # # Cut and paste the lines below to change the order in which your files # appear on the menu. Don't worry about adding or removing files, Natural # Docs will take care of that. # # You can further organize the menu by grouping the entries. Add a # "Group: [name] {" line to start a group, and add a "}" to end it. # # You can add text and web links to the menu by adding "Text: [text]" and # "Link: [name] ([URL])" lines, respectively. # # The formatting and comments are auto-generated, so don't worry about # neatness when editing the file. Natural Docs will clean it up the next # time it is run. When working with groups, just deal with the braces and # forget about the indentation and comments. # # -------------------------------------------------------------------------- File: Londiste functions (functions.sql) File: Londiste internals (schema.sql) Group: Index { Index: Everything Database Table Index: Database Tables Function Index: Functions } # Group: Index skytools-3.2.6/sql/londiste/structure/0000755000000000000000000000000012426435645014772 5ustar skytools-3.2.6/sql/londiste/structure/triggers.sql0000644000000000000000000000021412426435645017336 0ustar create trigger table_info_trigger_sync before delete on londiste.table_info for each row execute procedure londiste.table_info_trigger(); skytools-3.2.6/sql/londiste/structure/ext_postproc.sql0000644000000000000000000000050612426435645020245 0ustar -- tag data objects as dumpable SELECT pg_catalog.pg_extension_config_dump('londiste.table_info', ''); SELECT pg_catalog.pg_extension_config_dump('londiste.seq_info', ''); SELECT pg_catalog.pg_extension_config_dump('londiste.applied_execute', ''); SELECT pg_catalog.pg_extension_config_dump('londiste.pending_fkeys', ''); skytools-3.2.6/sql/londiste/structure/tables.sql0000644000000000000000000001375712426435645017002 0ustar -- ---------------------------------------------------------------------- -- Section: Londiste internals -- -- Londiste storage: tables/seqs/fkeys/triggers/events. -- -- Londiste event types: -- I/U/D - partial SQL event from pgq.sqltriga() -- I:/U:/D: - urlencoded event from pgq.logutriga() -- EXECUTE - SQL script execution -- TRUNCATE - table truncation -- londiste.add-table - global table addition -- londiste.remove-table - global table removal -- londiste.update-seq - sequence update -- londiste.remove-seq - global sequence removal -- -- pgq.sqltriga() event: -- ev_type - I/U/D which means insert/update/delete -- ev_data - partial SQL -- ev_extra1 - table name -- -- Insert: ev_type = "I", ev_data = "(col1, col2) values (2, 'foo')", ev_extra1 = "public.tblname" -- -- Update: ev_type = "U", ev_data = "col2 = null where col1 = 2", ev_extra1 = "public.tblname" -- -- Delete: ev_type = "D", ev_data = "col1 = 2", ev_extra1 = "public.tblname" -- -- pgq.logutriga() event: -- ev_type - I:/U:/D: plus comma separated list of pkey columns -- ev_data - urlencoded row columns -- ev_extra1 - table name -- -- Insert: ev_type = "I:col1", ev_data = "" -- -- Truncate trigger event: -- ev_type - TRUNCATE -- ev_extra1 - table name -- -- Execute SQL event: -- ev_type - EXECUTE -- ev_data - SQL script -- ev_extra1 - Script ID -- -- Global table addition: -- ev_type - londiste.add-table -- ev_data - table name -- -- Global table removal: -- ev_type - londiste.remove-table -- ev_data - table name -- -- Global sequence update: -- ev_type - londiste.update-seq -- ev_data - seq value -- ev_extra1 - seq name --5) -- Global sequence removal: -- ev_type - londiste.remove-seq -- ev_data - seq name -- ---------------------------------------------------------------------- create schema londiste; set default_with_oids = 'off'; -- ---------------------------------------------------------------------- -- Table: londiste.table_info -- -- Info about registered tables. -- -- Columns: -- nr - number for visual ordering -- queue_name - Cascaded queue name -- table_name - fully-qualified table name -- local - Is used locally -- merge_state - State for tables -- custom_snapshot - remote snapshot for COPY command -- dropped_ddl - temp place to store ddl -- table_attrs - urlencoded dict of extra attributes -- -- Tables merge states: -- NULL - copy has not yet happened -- in-copy - ongoing bulk copy -- catching-up - copy process applies events that happened during copy -- wanna-sync:% - copy process caught up, wants to hand table over to replay -- do-sync:% - replay process is ready to accept the table -- ok - in sync, replay applies events -- ---------------------------------------------------------------------- create table londiste.table_info ( nr serial not null, queue_name text not null, table_name text not null, local boolean not null default false, merge_state text, custom_snapshot text, dropped_ddl text, table_attrs text, dest_table text, primary key (queue_name, table_name), foreign key (queue_name) references pgq_node.node_info (queue_name) on delete cascade, check (dropped_ddl is null or merge_state in ('in-copy', 'catching-up')) ); -- ---------------------------------------------------------------------- -- Table: londiste.seq_info -- -- Sequences available on this queue. -- -- Columns: -- nr - number for visual ordering -- queue_name - cascaded queue name -- seq_name - fully-qualified seq name -- local - there is actual seq on local node -- last_value - last published value from root -- ---------------------------------------------------------------------- create table londiste.seq_info ( nr serial not null, queue_name text not null, seq_name text not null, local boolean not null default false, last_value int8 not null, primary key (queue_name, seq_name), foreign key (queue_name) references pgq_node.node_info (queue_name) on delete cascade ); -- ---------------------------------------------------------------------- -- Table: londiste.applied_execute -- -- Info about EXECUTE commands that are ran. -- -- Columns: -- queue_name - cascaded queue name -- execute_file - filename / unique id -- execute_time - the time execute happened -- execute_sql - contains SQL for EXECUTE event (informative) -- ---------------------------------------------------------------------- create table londiste.applied_execute ( queue_name text not null, execute_file text not null, execute_time timestamptz not null default now(), execute_sql text not null, execute_attrs text, primary key (execute_file) ); -- ---------------------------------------------------------------------- -- Table: londiste.pending_fkeys -- -- Details on dropped fkeys. Global, not specific to any set. -- -- Columns: -- from_table - fully-qualified table name -- to_table - fully-qualified table name -- fkey_name - name of constraint -- fkey_def - full fkey definition -- ---------------------------------------------------------------------- create table londiste.pending_fkeys ( from_table text not null, to_table text not null, fkey_name text not null, fkey_def text not null, primary key (from_table, fkey_name) ); skytools-3.2.6/sql/londiste/structure/ext_unpackaged.sql0000644000000000000000000000042612426435645020477 0ustar ALTER EXTENSION londiste ADD SCHEMA londiste; ALTER EXTENSION londiste ADD TABLE londiste.table_info; ALTER EXTENSION londiste ADD TABLE londiste.seq_info; ALTER EXTENSION londiste ADD TABLE londiste.applied_execute; ALTER EXTENSION londiste ADD TABLE londiste.pending_fkeys; skytools-3.2.6/sql/londiste/structure/upgrade.sql0000644000000000000000000000003312426435645017136 0ustar \i structure/functions.sql skytools-3.2.6/sql/londiste/structure/grants.ini0000644000000000000000000000572212426435645016777 0ustar [GrantFu] # roles that we maintain in this file roles = londiste_writer, londiste_reader, public, pgq_admin [1.tables] on.tables = londiste.table_info, londiste.seq_info, londiste.pending_fkeys, londiste.applied_execute pgq_admin = select, insert, update, delete londiste_reader = select # backwards compat, should be dropped? public = select [2.public.fns] on.functions = %(londiste_public_fns)s public = execute [3.remote.node] on.functions = %(londiste_remote_fns)s londiste_reader = execute londiste_writer = execute [4.local.node] on.functions = %(londiste_local_fns)s, %(londiste_internal_fns)s londiste_writer = execute [5.seqs] londiste_writer = usage on.sequences = londiste.table_info_nr_seq, londiste.seq_info_nr_seq [6.maint] pgq_admin = execute on.functions = londiste.periodic_maintenance() # define various groups of functions [DEFAULT] # can be executed by everybody, read-only, not secdef londiste_public_fns = londiste.find_column_types(text), londiste.find_table_fkeys(text), londiste.find_rel_oid(text, text), londiste.find_table_oid(text), londiste.find_seq_oid(text), londiste.is_replica_func(oid), londiste.quote_fqname(text), londiste.make_fqname(text), londiste.split_fqname(text), londiste.version() # remote node uses those on provider, read local tables londiste_remote_fns = londiste.get_seq_list(text), londiste.get_table_list(text), londiste._coordinate_copy(text, text) # used by owner only londiste_internal_fns = londiste.periodic_maintenance(), londiste.upgrade_schema() # used by local worker, admin londiste_local_fns = londiste.local_show_missing(text), londiste.local_add_seq(text, text), londiste.local_add_table(text, text, text[], text, text), londiste.local_add_table(text, text, text[], text), londiste.local_add_table(text, text, text[]), londiste.local_add_table(text, text), londiste.local_remove_seq(text, text), londiste.local_remove_table(text, text), londiste.global_add_table(text, text), londiste.global_remove_table(text, text), londiste.global_update_seq(text, text, int8), londiste.global_remove_seq(text, text), londiste.get_table_pending_fkeys(text), londiste.get_valid_pending_fkeys(text), londiste.drop_table_fkey(text, text), londiste.restore_table_fkey(text, text), londiste.execute_start(text, text, text, boolean), londiste.execute_finish(text, text), londiste.root_check_seqs(text, int8), londiste.root_check_seqs(text), londiste.root_notify_change(text, text, text), londiste.local_set_table_state(text, text, text, text), londiste.local_set_table_attrs(text, text, text), londiste.local_set_table_struct(text, text, text), londiste.drop_table_triggers(text, text), londiste.table_info_trigger(), londiste.create_partition(text, text, text, text, timestamptz, text), londiste.is_obsolete_partition (text, interval, text), londiste.list_obsolete_partitions (text, interval, text), londiste.drop_obsolete_partitions (text, interval, text), londiste.create_trigger(text,text,text[],text,text) skytools-3.2.6/sql/londiste/structure/functions.sql0000644000000000000000000000365612426435645017535 0ustar -- Section: Londiste functions -- upgrade schema \i functions/londiste.upgrade_schema.sql select londiste.upgrade_schema(); -- Group: Information \i functions/londiste.get_seq_list.sql \i functions/londiste.get_table_list.sql \i functions/londiste.local_show_missing.sql -- Group: Local object registration (setup tool) \i functions/londiste.local_add_seq.sql \i functions/londiste.create_trigger.sql \i functions/londiste.local_add_table.sql \i functions/londiste.local_change_handler.sql \i functions/londiste.local_remove_seq.sql \i functions/londiste.local_remove_table.sql -- Group: Global object registrations (internal) \i functions/londiste.global_add_table.sql \i functions/londiste.global_remove_table.sql \i functions/londiste.global_update_seq.sql \i functions/londiste.global_remove_seq.sql -- Group: FKey handling \i functions/londiste.handle_fkeys.sql -- Group: Execute handling \i functions/londiste.execute_start.sql \i functions/londiste.execute_finish.sql -- Group: Internal functions \i functions/londiste.root_check_seqs.sql \i functions/londiste.root_notify_change.sql \i functions/londiste.local_set_table_state.sql \i functions/londiste.local_set_table_attrs.sql \i functions/londiste.local_set_table_struct.sql \i functions/londiste.periodic_maintenance.sql -- Group: Utility functions \i functions/londiste.find_column_types.sql \i functions/londiste.find_table_fkeys.sql \i functions/londiste.find_table_oid.sql \i functions/londiste.quote_fqname.sql \i functions/londiste.make_fqname.sql \i functions/londiste.split_fqname.sql \i functions/londiste.table_info_trigger.sql \i functions/londiste.drop_table_triggers.sql \i functions/londiste.is_replica_func.sql \i functions/londiste.version.sql -- Group: Utility functions for handlers \i functions/londiste.create_partition.sql \i functions/londiste.is_obsolete_partition.sql \i functions/londiste.list_obsolete_partitions.sql \i functions/londiste.drop_obsolete_partitions.sql skytools-3.2.6/sql/londiste/structure/grants.sql0000644000000000000000000000035612426435645017015 0ustar grant usage on schema londiste to public; grant select on londiste.table_info to public; grant select on londiste.seq_info to public; grant select on londiste.pending_fkeys to public; grant select on londiste.applied_execute to public; skytools-3.2.6/sql/londiste/structure/install.sql0000644000000000000000000000014512426435645017161 0ustar \i structure/tables.sql \i structure/functions.sql \i structure/triggers.sql \i structure/grants.sql skytools-3.2.6/sql/conflicthandler/0000755000000000000000000000000012426435645014250 5ustar skytools-3.2.6/sql/conflicthandler/merge_on_time.sql0000644000000000000000000000156512426435645017611 0ustar create or replace function merge_on_time( fn_conf text, cur_tick text, ev_id text, ev_time text, ev_txid text, ev_retry text, ev_type text, ev_data text, ev_extra1 text, ev_extra2 text, ev_extra3 text, ev_extra4 text) returns text as $$ # callback function for londiste applyfn handler try: import pkgloader pkgloader.require('skytools', '3.0') from skytools.plpy_applyrow import ts_conflict_handler args = [fn_conf, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4] return ts_conflict_handler(SD, args) except: import traceback for ln in traceback.format_exc().split('\n'): if ln: plpy.warning(ln) raise $$ language plpythonu; -- select merge_on_time('timefield=modified_date', 'I:id_ccard', 'key_user=foo&id_ccard=1&modified_date=2005-01-01', 'ccdb.ccard', '', '', '');skytools-3.2.6/sql/conflicthandler/README0000644000000000000000000000030212426435645015123 0ustar Merge function to be used with londiste 'applyfn' handler. londiste3 add-table foo --handler=applyfn --handler-arg="func_name=merge_on_time" --handler-arg="func_conf=timefield=modified_date" skytools-3.2.6/sql/conflicthandler/Makefile0000644000000000000000000000040212426435645015704 0ustar REGRESS = test_merge REGRESS_OPTS = --load-language=plpgsql --load-language=plpythonu PG_CONFIG = pg_config PGXS = $(shell $(PG_CONFIG) --pgxs) include $(PGXS) test: make installcheck || { less regression.diffs ; exit 1; } ack: cp results/* expected/ skytools-3.2.6/sql/conflicthandler/sql/0000755000000000000000000000000012426435645015047 5ustar skytools-3.2.6/sql/conflicthandler/sql/test_merge.sql0000644000000000000000000000144512426435645017732 0ustar \set ECHO none \i merge_on_time.sql \set ECHO all set DateStyle='ISO'; create table mergetest ( intcol int4, txtcol text, timecol timestamp ); -- insert to empty select merge_on_time('timefield=timecol', null, null, null, null, null, 'I:intcol', 'intcol=5&txtcol=v1&timecol=2010-09-09+12:12', 'mergetest', null, null, null); select * from mergetest; -- insert to with time earlier select merge_on_time('timefield=timecol', null, null, null, null, null, 'I:intcol', 'intcol=5&txtcol=v2&timecol=2010-09-08+12:12', 'mergetest', null, null, null); select * from mergetest; -- insert to with time later select merge_on_time('timefield=timecol', null, null, null, null, null, 'I:intcol', 'intcol=5&txtcol=v3&timecol=2010-09-10+12:12', 'mergetest', null, null, null); select * from mergetest; skytools-3.2.6/sql/conflicthandler/expected/0000755000000000000000000000000012426435645016051 5ustar skytools-3.2.6/sql/conflicthandler/expected/test_merge.out0000644000000000000000000000273212426435645020744 0ustar \set ECHO none set DateStyle='ISO'; create table mergetest ( intcol int4, txtcol text, timecol timestamp ); -- insert to empty select merge_on_time('timefield=timecol', null, null, null, null, null, 'I:intcol', 'intcol=5&txtcol=v1&timecol=2010-09-09+12:12', 'mergetest', null, null, null); merge_on_time --------------------- data ok, no old row (1 row) select * from mergetest; intcol | txtcol | timecol --------+--------+--------------------- 5 | v1 | 2010-09-09 12:12:00 (1 row) -- insert to with time earlier select merge_on_time('timefield=timecol', null, null, null, null, null, 'I:intcol', 'intcol=5&txtcol=v2&timecol=2010-09-08+12:12', 'mergetest', null, null, null); merge_on_time --------------------------------------------------- IGN:data ok, old row, current row more up-to-date (1 row) select * from mergetest; intcol | txtcol | timecol --------+--------+--------------------- 5 | v1 | 2010-09-09 12:12:00 (1 row) -- insert to with time later select merge_on_time('timefield=timecol', null, null, null, null, null, 'I:intcol', 'intcol=5&txtcol=v3&timecol=2010-09-10+12:12', 'mergetest', null, null, null); merge_on_time ---------------------------------- data ok, old row, new row better (1 row) select * from mergetest; intcol | txtcol | timecol --------+--------+--------------------- 5 | v3 | 2010-09-10 12:12:00 (1 row) skytools-3.2.6/sql/pgq_coop/0000755000000000000000000000000012426435645012720 5ustar skytools-3.2.6/sql/pgq_coop/pgq_coop.control0000644000000000000000000000024112426435645016126 0ustar # pgq_coop comment = 'Cooperative queue consuming for PgQ' default_version = '3.1.1' relocatable = false superuser = true schema = 'pg_catalog' requires = 'pgq' skytools-3.2.6/sql/pgq_coop/Makefile0000644000000000000000000000055412426435645014364 0ustar EXTENSION = pgq_coop EXT_VERSION = 3.1.1 EXT_OLD_VERSIONS = 3.1 Contrib_regress = pgq_coop_init_noext pgq_coop_test Extension_regress = pgq_coop_init_ext pgq_coop_test include ../common-pgxs.mk # # docs # dox: cleandox $(SRCS) mkdir -p docs/html mkdir -p docs/sql $(CATSQL) --ndoc structure/functions.sql > docs/sql/functions.sql $(NDOC) $(NDOCARGS) skytools-3.2.6/sql/pgq_coop/functions/0000755000000000000000000000000012426435645014730 5ustar skytools-3.2.6/sql/pgq_coop/functions/pgq_coop.register_subconsumer.sql0000644000000000000000000000457112426435645023537 0ustar create or replace function pgq_coop.register_subconsumer( i_queue_name text, i_consumer_name text, i_subconsumer_name text) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq_coop.register_subconsumer(3) -- -- Subscribe a subconsumer on a queue. -- -- Subconsumer will be registered as another consumer on queue, -- whose name will be i_consumer_name and i_subconsumer_name -- combined. -- -- Returns: -- 0 - if already registered -- 1 - if this is a new registration -- -- Calls: -- pgq.register_consumer(i_queue_name, i_consumer_name) -- pgq.register_consumer(i_queue_name, _subcon_name); -- -- Tables directly manipulated: -- update - pgq.subscription -- -- ---------------------------------------------------------------------- declare _subcon_name text; -- consumer + subconsumer _queue_id integer; _consumer_id integer; _subcon_id integer; _consumer_sub_id integer; _subcon_result integer; r record; begin _subcon_name := i_consumer_name || '.' || i_subconsumer_name; -- make sure main consumer exists perform pgq.register_consumer(i_queue_name, i_consumer_name); -- just go and register the subconsumer as a regular consumer _subcon_result := pgq.register_consumer(i_queue_name, _subcon_name); -- if it is a new registration if _subcon_result = 1 then select q.queue_id, mainc.co_id as main_consumer_id, s.sub_id as main_consumer_sub_id, subc.co_id as sub_consumer_id into r from pgq.queue q, pgq.subscription s, pgq.consumer mainc, pgq.consumer subc where mainc.co_name = i_consumer_name and subc.co_name = _subcon_name and q.queue_name = i_queue_name and s.sub_queue = q.queue_id and s.sub_consumer = mainc.co_id; if not found then raise exception 'main consumer not found'; end if; -- duplicate the sub_id of consumer to the subconsumer update pgq.subscription s set sub_id = r.main_consumer_sub_id, sub_last_tick = null, sub_next_tick = null where sub_queue = r.queue_id and sub_consumer = r.sub_consumer_id; end if; return _subcon_result; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_coop/functions/pgq_coop.next_batch.sql0000644000000000000000000002224312426435645021401 0ustar create or replace function pgq_coop.next_batch( i_queue_name text, i_consumer_name text, i_subconsumer_name text) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq_coop.next_batch(3) -- -- Makes next block of events active -- -- Result NULL means nothing to work with, for a moment -- -- Parameters: -- i_queue_name - Name of the queue -- i_consumer_name - Name of the consumer -- i_subconsumer_name - Name of the subconsumer -- -- Calls: -- pgq.register_consumer(i_queue_name, i_consumer_name) -- pgq.register_consumer(i_queue_name, _subcon_name); -- -- Tables directly manipulated: -- update - pgq.subscription -- -- ---------------------------------------------------------------------- begin return pgq_coop.next_batch_custom(i_queue_name, i_consumer_name, i_subconsumer_name, NULL, NULL, NULL, NULL); end; $$ language plpgsql; create or replace function pgq_coop.next_batch( i_queue_name text, i_consumer_name text, i_subconsumer_name text, i_dead_interval interval) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq_coop.next_batch(4) -- -- Makes next block of events active -- -- If i_dead_interval is set, other subconsumers are checked for -- inactivity. If some subconsumer has active batch, but has -- been inactive more than i_dead_interval, the batch is taken over. -- -- Result NULL means nothing to work with, for a moment -- -- Parameters: -- i_queue_name - Name of the queue -- i_consumer_name - Name of the consumer -- i_subconsumer_name - Name of the subconsumer -- i_dead_interval - Take over other subconsumer batch if inactive -- ---------------------------------------------------------------------- begin return pgq_coop.next_batch_custom(i_queue_name, i_consumer_name, i_subconsumer_name, NULL, NULL, NULL, i_dead_interval); end; $$ language plpgsql; create or replace function pgq_coop.next_batch_custom( i_queue_name text, i_consumer_name text, i_subconsumer_name text, i_min_lag interval, i_min_count int4, i_min_interval interval) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq_coop.next_batch_custom(6) -- -- Makes next block of events active. Block size can be tuned -- with i_min_count, i_min_interval parameters. Events age can -- be tuned with i_min_lag. -- -- Result NULL means nothing to work with, for a moment -- -- Parameters: -- i_queue_name - Name of the queue -- i_consumer_name - Name of the consumer -- i_subconsumer_name - Name of the subconsumer -- i_min_lag - Consumer wants events older than that -- i_min_count - Consumer wants batch to contain at least this many events -- i_min_interval - Consumer wants batch to cover at least this much time -- ---------------------------------------------------------------------- begin return pgq_coop.next_batch_custom(i_queue_name, i_consumer_name, i_subconsumer_name, i_min_lag, i_min_count, i_min_interval, NULL); end; $$ language plpgsql; create or replace function pgq_coop.next_batch_custom( i_queue_name text, i_consumer_name text, i_subconsumer_name text, i_min_lag interval, i_min_count int4, i_min_interval interval, i_dead_interval interval) returns bigint as $$ -- ---------------------------------------------------------------------- -- Function: pgq_coop.next_batch_custom(7) -- -- Makes next block of events active. Block size can be tuned -- with i_min_count, i_min_interval parameters. Events age can -- be tuned with i_min_lag. -- -- If i_dead_interval is set, other subconsumers are checked for -- inactivity. If some subconsumer has active batch, but has -- been inactive more than i_dead_interval, the batch is taken over. -- -- Result NULL means nothing to work with, for a moment -- -- Parameters: -- i_queue_name - Name of the queue -- i_consumer_name - Name of the consumer -- i_subconsumer_name - Name of the subconsumer -- i_min_lag - Consumer wants events older than that -- i_min_count - Consumer wants batch to contain at least this many events -- i_min_interval - Consumer wants batch to cover at least this much time -- i_dead_interval - Take over other subconsumer batch if inactive -- Calls: -- pgq.register_subconsumer(i_queue_name, i_consumer_name, i_subconsumer_name) -- pgq.next_batch_custom(i_queue_name, i_consumer_name, i_min_lag, i_min_count, i_min_interval) -- Tables directly manipulated: -- update - pgq.subscription -- ---------------------------------------------------------------------- declare _queue_id integer; _consumer_id integer; _subcon_id integer; _batch_id bigint; _prev_tick bigint; _cur_tick bigint; _sub_id integer; _dead record; begin -- fetch master consumer details, lock the row select q.queue_id, c.co_id, s.sub_next_tick into _queue_id, _consumer_id, _cur_tick from pgq.queue q, pgq.consumer c, pgq.subscription s where q.queue_name = i_queue_name and c.co_name = i_consumer_name and s.sub_queue = q.queue_id and s.sub_consumer = c.co_id for update of s; if not found then perform pgq_coop.register_subconsumer(i_queue_name, i_consumer_name, i_subconsumer_name); -- fetch the data again select q.queue_id, c.co_id, s.sub_next_tick into _queue_id, _consumer_id, _cur_tick from pgq.queue q, pgq.consumer c, pgq.subscription s where q.queue_name = i_queue_name and c.co_name = i_consumer_name and s.sub_queue = q.queue_id and s.sub_consumer = c.co_id; end if; if _cur_tick is not null then raise exception 'main consumer has batch open?'; end if; -- automatically register subconsumers perform 1 from pgq.subscription s, pgq.consumer c, pgq.queue q where q.queue_name = i_queue_name and s.sub_queue = q.queue_id and s.sub_consumer = c.co_id and c.co_name = i_consumer_name || '.' || i_subconsumer_name; if not found then perform pgq_coop.register_subconsumer(i_queue_name, i_consumer_name, i_subconsumer_name); end if; -- fetch subconsumer details select s.sub_batch, sc.co_id, s.sub_id into _batch_id, _subcon_id, _sub_id from pgq.subscription s, pgq.consumer sc where sub_queue = _queue_id and sub_consumer = sc.co_id and sc.co_name = i_consumer_name || '.' || i_subconsumer_name; if not found then raise exception 'subconsumer not found'; end if; -- is there a batch already active if _batch_id is not null then update pgq.subscription set sub_active = now() where sub_queue = _queue_id and sub_consumer = _subcon_id; return _batch_id; end if; -- help dead comrade if i_dead_interval is not null then -- check if some other subconsumer has died select s.sub_batch, s.sub_consumer, s.sub_last_tick, s.sub_next_tick into _dead from pgq.subscription s where s.sub_queue = _queue_id and s.sub_id = _sub_id and s.sub_consumer <> _subcon_id and s.sub_consumer <> _consumer_id and sub_active < now() - i_dead_interval limit 1; if found then -- unregister old consumer delete from pgq.subscription where sub_queue = _queue_id and sub_consumer = _dead.sub_consumer; -- if dead consumer had batch, copy it over and return if _dead.sub_batch is not null then update pgq.subscription set sub_batch = _dead.sub_batch, sub_last_tick = _dead.sub_last_tick, sub_next_tick = _dead.sub_next_tick, sub_active = now() where sub_queue = _queue_id and sub_consumer = _subcon_id; return _dead.sub_batch; end if; end if; end if; -- get a new batch for the main consumer select batch_id, cur_tick_id, prev_tick_id into _batch_id, _cur_tick, _prev_tick from pgq.next_batch_custom(i_queue_name, i_consumer_name, i_min_lag, i_min_count, i_min_interval); if _batch_id is null then return null; end if; -- close batch for main consumer update pgq.subscription set sub_batch = null, sub_active = now(), sub_last_tick = sub_next_tick, sub_next_tick = null where sub_queue = _queue_id and sub_consumer = _consumer_id; -- copy state into subconsumer row update pgq.subscription set sub_batch = _batch_id, sub_last_tick = _prev_tick, sub_next_tick = _cur_tick, sub_active = now() where sub_queue = _queue_id and sub_consumer = _subcon_id; return _batch_id; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_coop/functions/pgq_coop.unregister_subconsumer.sql0000644000000000000000000000323112426435645024072 0ustar create or replace function pgq_coop.unregister_subconsumer( i_queue_name text, i_consumer_name text, i_subconsumer_name text, i_batch_handling integer) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq_coop.unregister_subconsumer(4) -- -- Unregisters subconsumer from the queue. -- -- If consumer has active batch, then behviour depends on -- i_batch_handling parameter. -- -- Values for i_batch_handling: -- 0 - Fail with an exception. -- 1 - Close the batch, ignoring the events. -- -- Returns: -- 0 - no consumer found -- 1 - consumer found and unregistered -- -- Tables directly manipulated: -- delete - pgq.subscription -- -- ---------------------------------------------------------------------- declare _current_batch bigint; _queue_id integer; _subcon_id integer; begin select q.queue_id, c.co_id, sub_batch into _queue_id, _subcon_id, _current_batch from pgq.queue q, pgq.consumer c, pgq.subscription s where c.co_name = i_consumer_name || '.' || i_subconsumer_name and q.queue_name = i_queue_name and s.sub_queue = q.queue_id and s.sub_consumer = c.co_id; if not found then return 0; end if; if _current_batch is not null then if i_batch_handling = 1 then -- ignore active batch else raise exception 'subconsumer has active batch'; end if; end if; delete from pgq.subscription where sub_queue = _queue_id and sub_consumer = _subcon_id; return 1; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_coop/functions/pgq_coop.finish_batch.sql0000644000000000000000000000201312426435645021674 0ustar create or replace function pgq_coop.finish_batch( i_batch_id bigint) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq_coop.finish_batch(1) -- -- Closes a batch. -- -- Parameters: -- i_batch_id - id of the batch to be closed -- -- Returns: -- 1 if success (batch was found), 0 otherwise -- Calls: -- None -- Tables directly manipulated: -- update - pgq.subscription -- ---------------------------------------------------------------------- begin -- we are dealing with subconsumer, so nullify all tick info -- tick columns for master consumer contain adequate data update pgq.subscription set sub_active = now(), sub_last_tick = null, sub_next_tick = null, sub_batch = null where sub_batch = i_batch_id; if not found then raise warning 'coop_finish_batch: batch % not found', i_batch_id; return 0; else return 1; end if; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_coop/functions/pgq_coop.version.sql0000644000000000000000000000066512426435645020753 0ustar create or replace function pgq_coop.version() returns text as $$ -- ---------------------------------------------------------------------- -- Function: pgq_coop.version(0) -- -- Returns version string for pgq_coop. ATM it is based on SkyTools version -- and only bumped when database code changes. -- ---------------------------------------------------------------------- begin return '3.1.1'; end; $$ language plpgsql; skytools-3.2.6/sql/pgq_coop/sql/0000755000000000000000000000000012426435645013517 5ustar skytools-3.2.6/sql/pgq_coop/sql/pgq_coop_test.sql0000644000000000000000000000416012426435645017107 0ustar select pgq.create_queue('testqueue'); update pgq.queue set queue_ticker_max_count = 1 where queue_name = 'testqueue'; -- register select pgq_coop.register_subconsumer('testqueue', 'maincons', 'subcons1'); select pgq_coop.register_subconsumer('testqueue', 'maincons', 'subcons1'); select pgq_coop.register_subconsumer('testqueue', 'maincons', 'subcons1'); select pgq_coop.register_subconsumer('testqueue', 'maincons', 'subcons2'); -- process events select pgq_coop.next_batch('testqueue', 'maincons', 'subcons1'); select pgq.insert_event('testqueue', 'ev0', 'data'); select pgq.insert_event('testqueue', 'ev1', 'data'); select pgq.insert_event('testqueue', 'ev2', 'data'); select pgq.ticker(); select pgq_coop.next_batch('testqueue', 'maincons', 'subcons1'); select pgq_coop.next_batch('testqueue', 'maincons', 'subcons1'); select pgq_coop.next_batch('testqueue', 'maincons', 'subcons2'); select pgq.insert_event('testqueue', 'ev3', 'data'); select pgq.insert_event('testqueue', 'ev4', 'data'); select pgq.insert_event('testqueue', 'ev5', 'data'); select pgq.ticker(); select pgq_coop.next_batch('testqueue', 'maincons', 'subcons2'); select pgq_coop.finish_batch(2); -- test takeover select pgq_coop.next_batch('testqueue', 'maincons', 'subcons2', '1 hour'); update pgq.subscription set sub_active = '2005-01-01' where sub_batch is not null; select pgq_coop.next_batch('testqueue', 'maincons', 'subcons2', '1 hour'); select pgq_coop.unregister_subconsumer('testqueue', 'maincons', 'subcons1', 0); select pgq_coop.unregister_subconsumer('testqueue', 'maincons', 'subcons2', 0); select pgq_coop.unregister_subconsumer('testqueue', 'maincons', 'subcons2', 1); select pgq_coop.unregister_subconsumer('testqueue', 'maincons', 'subcons2', 0); -- test auto-creation select pgq_coop.next_batch('testqueue', 'cmain', 'sc1'); select pgq_coop.next_batch('testqueue', 'cmain', 'sc2'); select consumer_name, last_tick from pgq.get_consumer_info(); -- test unregistering with pure pgq api select pgq.unregister_consumer('testqueue', 'cmain.sc2'); select pgq.unregister_consumer('testqueue', 'cmain'); select consumer_name, last_tick from pgq.get_consumer_info(); skytools-3.2.6/sql/pgq_coop/sql/pgq_coop_init_ext.sql0000644000000000000000000000026212426435645017752 0ustar create extension pgq; \set ECHO none \i structure/install.sql \set ECHO all create extension pgq_coop from 'unpackaged'; drop extension pgq_coop; create extension pgq_coop; skytools-3.2.6/sql/pgq_coop/sql/pgq_coop_init_noext.sql0000644000000000000000000000014412426435645020306 0ustar \set ECHO none \i ../pgq/pgq.sql \i structure/schema.sql \i structure/functions.sql \set ECHO all skytools-3.2.6/sql/pgq_coop/expected/0000755000000000000000000000000012426435645014521 5ustar skytools-3.2.6/sql/pgq_coop/expected/pgq_coop_test.out0000644000000000000000000000775112426435645020132 0ustar select pgq.create_queue('testqueue'); create_queue -------------- 1 (1 row) update pgq.queue set queue_ticker_max_count = 1 where queue_name = 'testqueue'; -- register select pgq_coop.register_subconsumer('testqueue', 'maincons', 'subcons1'); register_subconsumer ---------------------- 1 (1 row) select pgq_coop.register_subconsumer('testqueue', 'maincons', 'subcons1'); register_subconsumer ---------------------- 0 (1 row) select pgq_coop.register_subconsumer('testqueue', 'maincons', 'subcons1'); register_subconsumer ---------------------- 0 (1 row) select pgq_coop.register_subconsumer('testqueue', 'maincons', 'subcons2'); register_subconsumer ---------------------- 1 (1 row) -- process events select pgq_coop.next_batch('testqueue', 'maincons', 'subcons1'); next_batch ------------ (1 row) select pgq.insert_event('testqueue', 'ev0', 'data'); insert_event -------------- 1 (1 row) select pgq.insert_event('testqueue', 'ev1', 'data'); insert_event -------------- 2 (1 row) select pgq.insert_event('testqueue', 'ev2', 'data'); insert_event -------------- 3 (1 row) select pgq.ticker(); ticker -------- 1 (1 row) select pgq_coop.next_batch('testqueue', 'maincons', 'subcons1'); next_batch ------------ 1 (1 row) select pgq_coop.next_batch('testqueue', 'maincons', 'subcons1'); next_batch ------------ 1 (1 row) select pgq_coop.next_batch('testqueue', 'maincons', 'subcons2'); next_batch ------------ (1 row) select pgq.insert_event('testqueue', 'ev3', 'data'); insert_event -------------- 4 (1 row) select pgq.insert_event('testqueue', 'ev4', 'data'); insert_event -------------- 5 (1 row) select pgq.insert_event('testqueue', 'ev5', 'data'); insert_event -------------- 6 (1 row) select pgq.ticker(); ticker -------- 1 (1 row) select pgq_coop.next_batch('testqueue', 'maincons', 'subcons2'); next_batch ------------ 2 (1 row) select pgq_coop.finish_batch(2); finish_batch -------------- 1 (1 row) -- test takeover select pgq_coop.next_batch('testqueue', 'maincons', 'subcons2', '1 hour'); next_batch ------------ (1 row) update pgq.subscription set sub_active = '2005-01-01' where sub_batch is not null; select pgq_coop.next_batch('testqueue', 'maincons', 'subcons2', '1 hour'); next_batch ------------ 1 (1 row) select pgq_coop.unregister_subconsumer('testqueue', 'maincons', 'subcons1', 0); unregister_subconsumer ------------------------ 0 (1 row) select pgq_coop.unregister_subconsumer('testqueue', 'maincons', 'subcons2', 0); ERROR: subconsumer has active batch select pgq_coop.unregister_subconsumer('testqueue', 'maincons', 'subcons2', 1); unregister_subconsumer ------------------------ 1 (1 row) select pgq_coop.unregister_subconsumer('testqueue', 'maincons', 'subcons2', 0); unregister_subconsumer ------------------------ 0 (1 row) -- test auto-creation select pgq_coop.next_batch('testqueue', 'cmain', 'sc1'); next_batch ------------ (1 row) select pgq_coop.next_batch('testqueue', 'cmain', 'sc2'); next_batch ------------ (1 row) select consumer_name, last_tick from pgq.get_consumer_info(); consumer_name | last_tick ---------------+----------- cmain | 3 cmain.sc1 | cmain.sc2 | maincons | 3 (4 rows) -- test unregistering with pure pgq api select pgq.unregister_consumer('testqueue', 'cmain.sc2'); unregister_consumer --------------------- 1 (1 row) select pgq.unregister_consumer('testqueue', 'cmain'); unregister_consumer --------------------- 2 (1 row) select consumer_name, last_tick from pgq.get_consumer_info(); consumer_name | last_tick ---------------+----------- maincons | 3 (1 row) skytools-3.2.6/sql/pgq_coop/expected/pgq_coop_init_noext.out0000644000000000000000000000011212426435645021313 0ustar \set ECHO none upgrade_schema ---------------- 0 (1 row) skytools-3.2.6/sql/pgq_coop/expected/pgq_coop_init_ext.out0000644000000000000000000000020612426435645020762 0ustar create extension pgq; \set ECHO none create extension pgq_coop from 'unpackaged'; drop extension pgq_coop; create extension pgq_coop; skytools-3.2.6/sql/pgq_coop/docs/0000755000000000000000000000000012426435645013650 5ustar skytools-3.2.6/sql/pgq_coop/docs/Topics.txt0000644000000000000000000000650512426435645015660 0ustar Format: 1.52 # This is the Natural Docs topics file for this project. If you change anything # here, it will apply to THIS PROJECT ONLY. If you'd like to change something # for all your projects, edit the Topics.txt in Natural Docs' Config directory # instead. # If you'd like to prevent keywords from being recognized by Natural Docs, you # can do it like this: # Ignore Keywords: [keyword], [keyword], ... # # Or you can use the list syntax like how they are defined: # Ignore Keywords: # [keyword] # [keyword], [plural keyword] # ... #------------------------------------------------------------------------------- # SYNTAX: # # Topic Type: [name] # Alter Topic Type: [name] # Creates a new topic type or alters one from the main file. Each type gets # its own index and behavior settings. Its name can have letters, numbers, # spaces, and these charaters: - / . ' # # Plural: [name] # Sets the plural name of the topic type, if different. # # Keywords: # [keyword] # [keyword], [plural keyword] # ... # Defines or adds to the list of keywords for the topic type. They may only # contain letters, numbers, and spaces and are not case sensitive. Plural # keywords are used for list topics. You can redefine keywords found in the # main topics file. # # Index: [yes|no] # Whether the topics get their own index. Defaults to yes. Everything is # included in the general index regardless of this setting. # # Scope: [normal|start|end|always global] # How the topics affects scope. Defaults to normal. # normal - Topics stay within the current scope. # start - Topics start a new scope for all the topics beneath it, # like class topics. # end - Topics reset the scope back to global for all the topics # beneath it. # always global - Topics are defined as global, but do not change the scope # for any other topics. # # Class Hierarchy: [yes|no] # Whether the topics are part of the class hierarchy. Defaults to no. # # Page Title If First: [yes|no] # Whether the topic's title becomes the page title if it's the first one in # a file. Defaults to no. # # Break Lists: [yes|no] # Whether list topics should be broken into individual topics in the output. # Defaults to no. # # Can Group With: [type], [type], ... # Defines a list of topic types that this one can possibly be grouped with. # Defaults to none. #------------------------------------------------------------------------------- # The following topics are defined in the main file, if you'd like to alter # their behavior or add keywords: # # Generic, Class, Interface, Section, File, Group, Function, Variable, # Property, Type, Constant, Enumeration, Event, Delegate, Macro, # Database, Database Table, Database View, Database Index, Database # Cursor, Database Trigger, Cookie, Build Target # If you add something that you think would be useful to other developers # and should be included in Natural Docs by default, please e-mail it to # topics [at] naturaldocs [dot] org. Topic Type: Schema Plural: Schemas Index: No Scope: Start Class Hierarchy: Yes Keywords: schema, schemas Alter Topic Type: Function Add Keywords: public function internal function Alter Topic Type: File Index: No skytools-3.2.6/sql/pgq_coop/docs/Languages.txt0000644000000000000000000001202112426435645016313 0ustar Format: 1.52 # This is the Natural Docs languages file for this project. If you change # anything here, it will apply to THIS PROJECT ONLY. If you'd like to change # something for all your projects, edit the Languages.txt in Natural Docs' # Config directory instead. Ignore Extension: sql #------------------------------------------------------------------------------- # SYNTAX: # # Unlike other Natural Docs configuration files, in this file all comments # MUST be alone on a line. Some languages deal with the # character, so you # cannot put comments on the same line as content. # # Also, all lists are separated with spaces, not commas, again because some # languages may need to use them. # # Language: [name] # Alter Language: [name] # Defines a new language or alters an existing one. Its name can use any # characters. If any of the properties below have an add/replace form, you # must use that when using Alter Language. # # The language Shebang Script is special. It's entry is only used for # extensions, and files with those extensions have their shebang (#!) lines # read to determine the real language of the file. Extensionless files are # always treated this way. # # The language Text File is also special. It's treated as one big comment # so you can put Natural Docs content in them without special symbols. Also, # if you don't specify a package separator, ignored prefixes, or enum value # behavior, it will copy those settings from the language that is used most # in the source tree. # # Extensions: [extension] [extension] ... # [Add/Replace] Extensions: [extension] [extension] ... # Defines the file extensions of the language's source files. You can # redefine extensions found in the main languages file. You can use * to # mean any undefined extension. # # Shebang Strings: [string] [string] ... # [Add/Replace] Shebang Strings: [string] [string] ... # Defines a list of strings that can appear in the shebang (#!) line to # designate that it's part of the language. You can redefine strings found # in the main languages file. # # Ignore Prefixes in Index: [prefix] [prefix] ... # [Add/Replace] Ignored Prefixes in Index: [prefix] [prefix] ... # # Ignore [Topic Type] Prefixes in Index: [prefix] [prefix] ... # [Add/Replace] Ignored [Topic Type] Prefixes in Index: [prefix] [prefix] ... # Specifies prefixes that should be ignored when sorting symbols in an # index. Can be specified in general or for a specific topic type. # #------------------------------------------------------------------------------ # For basic language support only: # # Line Comments: [symbol] [symbol] ... # Defines a space-separated list of symbols that are used for line comments, # if any. # # Block Comments: [opening sym] [closing sym] [opening sym] [closing sym] ... # Defines a space-separated list of symbol pairs that are used for block # comments, if any. # # Package Separator: [symbol] # Defines the default package separator symbol. The default is a dot. # # [Topic Type] Prototype Enders: [symbol] [symbol] ... # When defined, Natural Docs will attempt to get a prototype from the code # immediately following the topic type. It stops when it reaches one of # these symbols. Use \n for line breaks. # # Line Extender: [symbol] # Defines the symbol that allows a prototype to span multiple lines if # normally a line break would end it. # # Enum Values: [global|under type|under parent] # Defines how enum values are referenced. The default is global. # global - Values are always global, referenced as 'value'. # under type - Values are under the enum type, referenced as # 'package.enum.value'. # under parent - Values are under the enum's parent, referenced as # 'package.value'. # # Perl Package: [perl package] # Specifies the Perl package used to fine-tune the language behavior in ways # too complex to do in this file. # #------------------------------------------------------------------------------ # For full language support only: # # Full Language Support: [perl package] # Specifies the Perl package that has the parsing routines necessary for full # language support. # #------------------------------------------------------------------------------- # The following languages are defined in the main file, if you'd like to alter # them: # # Text File, Shebang Script, C/C++, C#, Java, JavaScript, Perl, Python, # PHP, SQL, Visual Basic, Pascal, Assembly, Ada, Tcl, Ruby, Makefile, # ActionScript, ColdFusion, R, Fortran # If you add a language that you think would be useful to other developers # and should be included in Natural Docs by default, please e-mail it to # languages [at] naturaldocs [dot] org. Language: PLPGSQL Extension: sql Line Comment: -- Block Comment: /* */ Enum Values: Global Function Prototype Enders: , ; ) $ ' Variable Prototype Enders: , ; ) := default Default DEFAULT Database Index Prototype Enders: , ; ) Database Trigger Prototype Enders: begin Begin BEGIN skytools-3.2.6/sql/pgq_coop/docs/Menu.txt0000644000000000000000000000360012426435645015314 0ustar Format: 1.52 # You can add a title and sub-title to your menu like this: # Title: [project name] # SubTitle: [subtitle] # You can add a footer to your documentation like this: # Footer: [text] # If you want to add a copyright notice, this would be the place to do it. # You can add a timestamp to your documentation like one of these: # Timestamp: Generated on month day, year # Timestamp: Updated mm/dd/yyyy # Timestamp: Last updated mon day # # m - One or two digit month. January is "1" # mm - Always two digit month. January is "01" # mon - Short month word. January is "Jan" # month - Long month word. January is "January" # d - One or two digit day. 1 is "1" # dd - Always two digit day. 1 is "01" # day - Day with letter extension. 1 is "1st" # yy - Two digit year. 2006 is "06" # yyyy - Four digit year. 2006 is "2006" # year - Four digit year. 2006 is "2006" # -------------------------------------------------------------------------- # # Cut and paste the lines below to change the order in which your files # appear on the menu. Don't worry about adding or removing files, Natural # Docs will take care of that. # # You can further organize the menu by grouping the entries. Add a # "Group: [name] {" line to start a group, and add a "}" to end it. # # You can add text and web links to the menu by adding "Text: [text]" and # "Link: [name] ([URL])" lines, respectively. # # The formatting and comments are auto-generated, so don't worry about # neatness when editing the file. Natural Docs will clean it up the next # time it is run. When working with groups, just deal with the braces and # forget about the indentation and comments. # # -------------------------------------------------------------------------- File: Functions (functions.sql) Group: Index { Index: Everything Function Index: Functions } # Group: Index skytools-3.2.6/sql/pgq_coop/structure/0000755000000000000000000000000012426435645014760 5ustar skytools-3.2.6/sql/pgq_coop/structure/ext_postproc.sql0000644000000000000000000000000012426435645020220 0ustar skytools-3.2.6/sql/pgq_coop/structure/ext_unpackaged.sql0000644000000000000000000000006012426435645020457 0ustar ALTER EXTENSION pgq_coop ADD SCHEMA pgq_coop; skytools-3.2.6/sql/pgq_coop/structure/upgrade.sql0000644000000000000000000000003312426435645017124 0ustar \i structure/functions.sql skytools-3.2.6/sql/pgq_coop/structure/grants.ini0000644000000000000000000000113612426435645016760 0ustar [GrantFu] roles = pgq_reader, pgq_writer, pgq_admin, public [1.consumer] on.functions = %(pgq_coop_fns)s pgq_reader = execute [2.public] on.functions = pgq_coop.version() public = execute [DEFAULT] pgq_coop_fns = pgq_coop.register_subconsumer(text, text, text), pgq_coop.unregister_subconsumer(text, text, text, integer), pgq_coop.next_batch(text, text, text), pgq_coop.next_batch(text, text, text, interval), pgq_coop.next_batch_custom(text, text, text, interval, int4, interval), pgq_coop.next_batch_custom(text, text, text, interval, int4, interval, interval), pgq_coop.finish_batch(bigint) skytools-3.2.6/sql/pgq_coop/structure/functions.sql0000644000000000000000000000157312426435645017517 0ustar -- ---------------------------------------------------------------------- -- Section: Functions -- -- Overview: -- -- The usual flow of a cooperative consumer is to -- -- 1. register itself as a subconsumer for a queue: -- pgq_coop.register_subconsumer() -- -- And the run a loop doing -- -- 2A. pgq_coop.next_batch () -- -- 2B. pgq_coop.finish_batch() -- -- Once the cooperative (or sub-)consuber is done, it should unregister -- itself before exiting -- -- 3. pgq_coop.unregister_subconsumer() -- -- -- ---------------------------------------------------------------------- -- Group: Subconsumer registration \i functions/pgq_coop.register_subconsumer.sql \i functions/pgq_coop.unregister_subconsumer.sql -- Group: Event processing \i functions/pgq_coop.next_batch.sql \i functions/pgq_coop.finish_batch.sql -- Group: General Info \i functions/pgq_coop.version.sql skytools-3.2.6/sql/pgq_coop/structure/grants.sql0000644000000000000000000000005412426435645016776 0ustar GRANT usage ON SCHEMA pgq_coop TO public; skytools-3.2.6/sql/pgq_coop/structure/schema.sql0000644000000000000000000000003212426435645016734 0ustar create schema pgq_coop; skytools-3.2.6/sql/pgq_coop/structure/install.sql0000644000000000000000000000011312426435645017142 0ustar \i structure/schema.sql \i structure/functions.sql \i structure/grants.sql skytools-3.2.6/sql/pgq_ext/0000755000000000000000000000000012426435645012560 5ustar skytools-3.2.6/sql/pgq_ext/pgq_ext.control0000644000000000000000000000022412426435645015627 0ustar # pgq_ext comment = 'Target-side batch tracking infrastructure' default_version = '3.1' relocatable = false superuser = true schema = 'pg_catalog' skytools-3.2.6/sql/pgq_ext/Makefile0000644000000000000000000000064112426435645014221 0ustar EXTENSION = pgq_ext EXT_VERSION = 3.1 EXT_OLD_VERSIONS = Contrib_regress = init_noext test_pgq_ext test_upgrade Extension_regress = init_ext test_pgq_ext DOCS = README.pgq_ext include ../common-pgxs.mk dox: cleandox $(SRCS) mkdir -p docs/html mkdir -p docs/sql $(CATSQL) --ndoc structure/tables.sql > docs/sql/schema.sql $(CATSQL) --ndoc structure/upgrade.sql > docs/sql/functions.sql $(NDOC) $(NDOCARGS) skytools-3.2.6/sql/pgq_ext/functions/0000755000000000000000000000000012426435645014570 5ustar skytools-3.2.6/sql/pgq_ext/functions/pgq_ext.set_batch_done.sql0000644000000000000000000000376712426435645021735 0ustar create or replace function pgq_ext.set_batch_done( a_consumer text, a_subconsumer text, a_batch_id bigint) returns boolean as $$ -- ---------------------------------------------------------------------- -- Function: pgq_ext.set_batch_done(3) -- -- Marks a batch as "done" for certain consumer and subconsumer -- -- Parameters: -- a_consumer - consumer name -- a_subconsumer - subconsumer name -- a_batch_id - a batch id -- -- Returns: -- false if it already was done -- true for successfully marking it as done -- Calls: -- None -- Tables directly manipulated: -- update - pgq_ext.completed_batch -- ---------------------------------------------------------------------- begin if pgq_ext.is_batch_done(a_consumer, a_subconsumer, a_batch_id) then return false; end if; if a_batch_id > 0 then update pgq_ext.completed_batch set last_batch_id = a_batch_id where consumer_id = a_consumer and subconsumer_id = a_subconsumer; if not found then insert into pgq_ext.completed_batch (consumer_id, subconsumer_id, last_batch_id) values (a_consumer, a_subconsumer, a_batch_id); end if; end if; return true; end; $$ language plpgsql security definer; create or replace function pgq_ext.set_batch_done( a_consumer text, a_batch_id bigint) returns boolean as $$ -- ---------------------------------------------------------------------- -- Function: pgq_ext.set_batch_done(3) -- -- Marks a batch as "done" for certain consumer -- -- Parameters: -- a_consumer - consumer name -- a_batch_id - a batch id -- -- Returns: -- false if it already was done -- true for successfully marking it as done -- Calls: -- pgq_ext.set_batch_done(3) -- Tables directly manipulated: -- None -- ---------------------------------------------------------------------- begin return pgq_ext.set_batch_done(a_consumer, '', a_batch_id); end; $$ language plpgsql; skytools-3.2.6/sql/pgq_ext/functions/pgq_ext.set_last_tick.sql0000644000000000000000000000401012426435645021602 0ustar create or replace function pgq_ext.set_last_tick( a_consumer text, a_subconsumer text, a_tick_id bigint) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq_ext.set_last_tick(3) -- -- records last completed tick for consumer, -- removes row if a_tick_id is NULL -- -- Parameters: -- a_consumer - consumer name -- a_subconsumer - subconsumer name -- a_tick_id - a tick id -- -- Returns: -- 1 -- Calls: -- None -- Tables directly manipulated: -- delete - pgq_ext.completed_tick -- update - pgq_ext.completed_tick -- insert - pgq_ext.completed_tick -- ---------------------------------------------------------------------- begin if a_tick_id is null then delete from pgq_ext.completed_tick where consumer_id = a_consumer and subconsumer_id = a_subconsumer; else update pgq_ext.completed_tick set last_tick_id = a_tick_id where consumer_id = a_consumer and subconsumer_id = a_subconsumer; if not found then insert into pgq_ext.completed_tick (consumer_id, subconsumer_id, last_tick_id) values (a_consumer, a_subconsumer, a_tick_id); end if; end if; return 1; end; $$ language plpgsql security definer; create or replace function pgq_ext.set_last_tick( a_consumer text, a_tick_id bigint) returns integer as $$ -- ---------------------------------------------------------------------- -- Function: pgq_ext.set_last_tick(2) -- -- records last completed tick for consumer, -- removes row if a_tick_id is NULL -- -- Parameters: -- a_consumer - consumer name -- a_tick_id - a tick id -- -- Returns: -- 1 -- Calls: -- pgq_ext.set_last_tick(2) -- Tables directly manipulated: -- None -- ---------------------------------------------------------------------- begin return pgq_ext.set_last_tick(a_consumer, '', a_tick_id); end; $$ language plpgsql; skytools-3.2.6/sql/pgq_ext/functions/pgq_ext.set_event_done.sql0000644000000000000000000000614112426435645021762 0ustar create or replace function pgq_ext.set_event_done( a_consumer text, a_subconsumer text, a_batch_id bigint, a_event_id bigint) returns boolean as $$ -- ---------------------------------------------------------------------- -- Function: pgq_ext.set_event_done(4) -- -- Marks and event done in a batch for a certain consumer and subconsumer -- -- Parameters: -- a_consumer - consumer name -- a_subconsumer - subconsumer name -- a_batch_id - a batch id -- a_event_id - an event id -- -- Returns: -- false if already done -- true on success -- Calls: -- None -- Tables directly manipulated: -- insert - pgq_ext.partial_batch -- delete - pgq_ext.completed_event -- update - pgq_ext.partial_batch -- insert - pgq_ext.completed_event -- ---------------------------------------------------------------------- declare old_batch bigint; begin -- check if done perform 1 from pgq_ext.completed_event where consumer_id = a_consumer and subconsumer_id = a_subconsumer and batch_id = a_batch_id and event_id = a_event_id; if found then return false; end if; -- if batch changed, do cleanup select cur_batch_id into old_batch from pgq_ext.partial_batch where consumer_id = a_consumer and subconsumer_id = a_subconsumer; if not found then -- first time here insert into pgq_ext.partial_batch (consumer_id, subconsumer_id, cur_batch_id) values (a_consumer, a_subconsumer, a_batch_id); elsif old_batch <> a_batch_id then -- batch changed, that means old is finished on queue db -- thus the tagged events are not needed anymore delete from pgq_ext.completed_event where consumer_id = a_consumer and subconsumer_id = a_subconsumer and batch_id = old_batch; -- remember current one update pgq_ext.partial_batch set cur_batch_id = a_batch_id where consumer_id = a_consumer and subconsumer_id = a_subconsumer; end if; -- tag as done insert into pgq_ext.completed_event (consumer_id, subconsumer_id, batch_id, event_id) values (a_consumer, a_subconsumer, a_batch_id, a_event_id); return true; end; $$ language plpgsql security definer; create or replace function pgq_ext.set_event_done( a_consumer text, a_batch_id bigint, a_event_id bigint) returns boolean as $$ -- ---------------------------------------------------------------------- -- Function: pgq_ext.set_event_done(3) -- -- Marks and event done in a batch for a certain consumer and subconsumer -- -- Parameters: -- a_consumer - consumer name -- a_batch_id - a batch id -- a_event_id - an event id -- -- Returns: -- false if already done -- true on success -- Calls: -- pgq_ext.set_event_done(4) -- Tables directly manipulated: -- None -- ---------------------------------------------------------------------- begin return pgq_ext.set_event_done(a_consumer, '', a_batch_id, a_event_id); end; $$ language plpgsql; skytools-3.2.6/sql/pgq_ext/functions/pgq_ext.get_last_tick.sql0000644000000000000000000000255712426435645021604 0ustar create or replace function pgq_ext.get_last_tick(a_consumer text, a_subconsumer text) returns int8 as $$ -- ---------------------------------------------------------------------- -- Function: pgq_ext.get_last_tick(2) -- -- Gets last completed tick for this consumer -- -- Parameters: -- a_consumer - consumer name -- a_subconsumer - subconsumer name -- -- Returns: -- tick_id - last completed tick -- Calls: -- None -- Tables directly manipulated: -- None -- ---------------------------------------------------------------------- declare res int8; begin select last_tick_id into res from pgq_ext.completed_tick where consumer_id = a_consumer and subconsumer_id = a_subconsumer; return res; end; $$ language plpgsql security definer; create or replace function pgq_ext.get_last_tick(a_consumer text) returns int8 as $$ -- ---------------------------------------------------------------------- -- Function: pgq_ext.get_last_tick(1) -- -- Gets last completed tick for this consumer -- -- Parameters: -- a_consumer - consumer name -- -- Returns: -- tick_id - last completed tick -- Calls: -- pgq_ext.get_last_tick(2) -- Tables directly manipulated: -- None -- ---------------------------------------------------------------------- begin return pgq_ext.get_last_tick(a_consumer, ''); end; $$ language plpgsql; skytools-3.2.6/sql/pgq_ext/functions/pgq_ext.is_batch_done.sql0000644000000000000000000000317412426435645021545 0ustar create or replace function pgq_ext.is_batch_done( a_consumer text, a_subconsumer text, a_batch_id bigint) returns boolean as $$ -- ---------------------------------------------------------------------- -- Function: pgq_ext.is_batch_done(3) -- -- Checks if a certain consumer and subconsumer have completed the batch -- -- Parameters: -- a_consumer - consumer name -- a_subconsumer - subconsumer name -- a_batch_id - a batch id -- -- Returns: -- true if batch is done, else false -- Calls: -- None -- Tables directly manipulated: -- None -- ---------------------------------------------------------------------- declare res boolean; begin select last_batch_id = a_batch_id into res from pgq_ext.completed_batch where consumer_id = a_consumer and subconsumer_id = a_subconsumer; if not found then return false; end if; return res; end; $$ language plpgsql security definer; create or replace function pgq_ext.is_batch_done( a_consumer text, a_batch_id bigint) returns boolean as $$ -- ---------------------------------------------------------------------- -- Function: pgq_ext.is_batch_done(2) -- -- Checks if a certain consumer has completed the batch -- -- Parameters: -- a_consumer - consumer name -- a_batch_id - a batch id -- -- Returns: -- true if batch is done, else false -- Calls: -- pgq_ext.is_batch_done(3) -- Tables directly manipulated: -- None -- ---------------------------------------------------------------------- begin return pgq_ext.is_batch_done(a_consumer, '', a_batch_id); end; $$ language plpgsql; skytools-3.2.6/sql/pgq_ext/functions/pgq_ext.version.sql0000644000000000000000000000065112426435645020446 0ustar create or replace function pgq_ext.version() returns text as $$ -- ---------------------------------------------------------------------- -- Function: pgq_ext.version(0) -- -- Returns version string for pgq_ext. ATM it is based SkyTools version -- only bumped when database code changes. -- ---------------------------------------------------------------------- begin return '3.1'; end; $$ language plpgsql; skytools-3.2.6/sql/pgq_ext/functions/pgq_ext.is_event_done.sql0000644000000000000000000000340112426435645021576 0ustar create or replace function pgq_ext.is_event_done( a_consumer text, a_subconsumer text, a_batch_id bigint, a_event_id bigint) returns boolean as $$ -- ---------------------------------------------------------------------- -- Function: pgq_ext.is_event_done(4) -- -- Checks if a certain consumer and subconsumer have "done" and event -- in a batch -- -- Parameters: -- a_consumer - consumer name -- a_subconsumer - subconsumer name -- a_batch_id - a batch id -- a_event_id - an event id -- -- Returns: -- true if event is done, else false -- Calls: -- None -- Tables directly manipulated: -- None -- ---------------------------------------------------------------------- declare res bigint; begin perform 1 from pgq_ext.completed_event where consumer_id = a_consumer and subconsumer_id = a_subconsumer and batch_id = a_batch_id and event_id = a_event_id; return found; end; $$ language plpgsql security definer; create or replace function pgq_ext.is_event_done( a_consumer text, a_batch_id bigint, a_event_id bigint) returns boolean as $$ -- ---------------------------------------------------------------------- -- Function: pgq_ext.is_event_done(3) -- -- Checks if a certain consumer has "done" and event -- in a batch -- -- Parameters: -- a_consumer - consumer name -- a_batch_id - a batch id -- a_event_id - an event id -- -- Returns: -- true if event is done, else false -- Calls: -- Nonpgq_ext.is_event_done(4) -- Tables directly manipulated: -- None -- ---------------------------------------------------------------------- begin return pgq_ext.is_event_done(a_consumer, '', a_batch_id, a_event_id); end; $$ language plpgsql; skytools-3.2.6/sql/pgq_ext/functions/pgq_ext.upgrade_schema.sql0000644000000000000000000001050712426435645021731 0ustar create or replace function pgq_ext.upgrade_schema() returns int4 as $$ -- updates table structure if necessary -- ---------------------------------------------------------------------- -- Function: pgq_ext.upgrade_schema() -- -- Upgrades tables to have column subconsumer_id -- -- Parameters: -- None -- -- Returns: -- number of tables updated -- Calls: -- None -- Tables directly manipulated: -- alter - pgq_ext.completed_batch -- alter - pgq_ext.completed_tick -- alter - pgq_ext.partial_batch -- alter - pgq_ext.completed_event -- ---------------------------------------------------------------------- declare cnt int4 = 0; tbl text; sql text; begin -- pgq_ext.completed_batch: subconsumer_id perform 1 from information_schema.columns where table_schema = 'pgq_ext' and table_name = 'completed_batch' and column_name = 'subconsumer_id'; if not found then alter table pgq_ext.completed_batch add column subconsumer_id text; update pgq_ext.completed_batch set subconsumer_id = ''; alter table pgq_ext.completed_batch alter column subconsumer_id set not null; alter table pgq_ext.completed_batch drop constraint completed_batch_pkey; alter table pgq_ext.completed_batch add constraint completed_batch_pkey primary key (consumer_id, subconsumer_id); cnt := cnt + 1; end if; -- pgq_ext.completed_tick: subconsumer_id perform 1 from information_schema.columns where table_schema = 'pgq_ext' and table_name = 'completed_tick' and column_name = 'subconsumer_id'; if not found then alter table pgq_ext.completed_tick add column subconsumer_id text; update pgq_ext.completed_tick set subconsumer_id = ''; alter table pgq_ext.completed_tick alter column subconsumer_id set not null; alter table pgq_ext.completed_tick drop constraint completed_tick_pkey; alter table pgq_ext.completed_tick add constraint completed_tick_pkey primary key (consumer_id, subconsumer_id); cnt := cnt + 1; end if; -- pgq_ext.partial_batch: subconsumer_id perform 1 from information_schema.columns where table_schema = 'pgq_ext' and table_name = 'partial_batch' and column_name = 'subconsumer_id'; if not found then alter table pgq_ext.partial_batch add column subconsumer_id text; update pgq_ext.partial_batch set subconsumer_id = ''; alter table pgq_ext.partial_batch alter column subconsumer_id set not null; alter table pgq_ext.partial_batch drop constraint partial_batch_pkey; alter table pgq_ext.partial_batch add constraint partial_batch_pkey primary key (consumer_id, subconsumer_id); cnt := cnt + 1; end if; -- pgq_ext.completed_event: subconsumer_id perform 1 from information_schema.columns where table_schema = 'pgq_ext' and table_name = 'completed_event' and column_name = 'subconsumer_id'; if not found then alter table pgq_ext.completed_event add column subconsumer_id text; update pgq_ext.completed_event set subconsumer_id = ''; alter table pgq_ext.completed_event alter column subconsumer_id set not null; alter table pgq_ext.completed_event drop constraint completed_event_pkey; alter table pgq_ext.completed_event add constraint completed_event_pkey primary key (consumer_id, subconsumer_id, batch_id, event_id); cnt := cnt + 1; end if; -- add default value to subconsumer_id column for tbl in select table_name from information_schema.columns where table_schema = 'pgq_ext' and table_name in ('completed_tick', 'completed_event', 'partial_batch', 'completed_batch') and column_name = 'subconsumer_id' and column_default is null loop sql := 'alter table pgq_ext.' || tbl || ' alter column subconsumer_id set default ' || quote_literal(''); execute sql; cnt := cnt + 1; end loop; return cnt; end; $$ language plpgsql; skytools-3.2.6/sql/pgq_ext/sql/0000755000000000000000000000000012426435645013357 5ustar skytools-3.2.6/sql/pgq_ext/sql/test_upgrade.sql0000644000000000000000000000213512426435645016567 0ustar \set ECHO off set log_error_verbosity = 'terse'; set client_min_messages = 'fatal'; create language plpgsql; set client_min_messages = 'warning'; drop schema pgq_ext cascade; \i sql/old_ext.sql \i structure/upgrade.sql \set ECHO all -- -- test batch tracking -- select pgq_ext.is_batch_done('c', 1); select pgq_ext.set_batch_done('c', 1); select pgq_ext.is_batch_done('c', 1); select pgq_ext.set_batch_done('c', 1); select pgq_ext.is_batch_done('c', 2); select pgq_ext.set_batch_done('c', 2); -- -- test event tracking -- select pgq_ext.is_batch_done('c', 3); select pgq_ext.is_event_done('c', 3, 101); select pgq_ext.set_event_done('c', 3, 101); select pgq_ext.is_event_done('c', 3, 101); select pgq_ext.set_event_done('c', 3, 101); select pgq_ext.set_batch_done('c', 3); select * from pgq_ext.completed_event order by 1,2; -- -- test tick tracking -- select pgq_ext.get_last_tick('c'); select pgq_ext.set_last_tick('c', 1); select pgq_ext.get_last_tick('c'); select pgq_ext.set_last_tick('c', 2); select pgq_ext.get_last_tick('c'); select pgq_ext.set_last_tick('c', NULL); select pgq_ext.get_last_tick('c'); skytools-3.2.6/sql/pgq_ext/sql/old_ext.sql0000644000000000000000000001102712426435645015537 0ustar set client_min_messages = 'warning'; set default_with_oids = 'off'; create schema pgq_ext; grant usage on schema pgq_ext to public; -- -- batch tracking -- create table pgq_ext.completed_batch ( consumer_id text not null, last_batch_id bigint not null, primary key (consumer_id) ); -- -- event tracking -- create table pgq_ext.completed_event ( consumer_id text not null, batch_id bigint not null, event_id bigint not null, primary key (consumer_id, batch_id, event_id) ); create table pgq_ext.partial_batch ( consumer_id text not null, cur_batch_id bigint not null, primary key (consumer_id) ); -- -- tick tracking for SerialConsumer() -- no access functions provided here -- create table pgq_ext.completed_tick ( consumer_id text not null, last_tick_id bigint not null, primary key (consumer_id) ); create or replace function pgq_ext.is_batch_done( a_consumer text, a_batch_id bigint) returns boolean as $$ declare res boolean; begin select last_batch_id = a_batch_id into res from pgq_ext.completed_batch where consumer_id = a_consumer; if not found then return false; end if; return res; end; $$ language plpgsql security definer; create or replace function pgq_ext.set_batch_done( a_consumer text, a_batch_id bigint) returns boolean as $$ begin if pgq_ext.is_batch_done(a_consumer, a_batch_id) then return false; end if; if a_batch_id > 0 then update pgq_ext.completed_batch set last_batch_id = a_batch_id where consumer_id = a_consumer; if not found then insert into pgq_ext.completed_batch (consumer_id, last_batch_id) values (a_consumer, a_batch_id); end if; end if; return true; end; $$ language plpgsql security definer; create or replace function pgq_ext.is_event_done( a_consumer text, a_batch_id bigint, a_event_id bigint) returns boolean as $$ declare res bigint; begin perform 1 from pgq_ext.completed_event where consumer_id = a_consumer and batch_id = a_batch_id and event_id = a_event_id; return found; end; $$ language plpgsql security definer; create or replace function pgq_ext.set_event_done( a_consumer text, a_batch_id bigint, a_event_id bigint) returns boolean as $$ declare old_batch bigint; begin -- check if done perform 1 from pgq_ext.completed_event where consumer_id = a_consumer and batch_id = a_batch_id and event_id = a_event_id; if found then return false; end if; -- if batch changed, do cleanup select cur_batch_id into old_batch from pgq_ext.partial_batch where consumer_id = a_consumer; if not found then -- first time here insert into pgq_ext.partial_batch (consumer_id, cur_batch_id) values (a_consumer, a_batch_id); elsif old_batch <> a_batch_id then -- batch changed, that means old is finished on queue db -- thus the tagged events are not needed anymore delete from pgq_ext.completed_event where consumer_id = a_consumer and batch_id = old_batch; -- remember current one update pgq_ext.partial_batch set cur_batch_id = a_batch_id where consumer_id = a_consumer; end if; -- tag as done insert into pgq_ext.completed_event (consumer_id, batch_id, event_id) values (a_consumer, a_batch_id, a_event_id); return true; end; $$ language plpgsql security definer; create or replace function pgq_ext.get_last_tick(a_consumer text) returns int8 as $$ declare res int8; begin select last_tick_id into res from pgq_ext.completed_tick where consumer_id = a_consumer; return res; end; $$ language plpgsql security definer; create or replace function pgq_ext.set_last_tick(a_consumer text, a_tick_id bigint) returns integer as $$ begin if a_tick_id is null then delete from pgq_ext.completed_tick where consumer_id = a_consumer; else update pgq_ext.completed_tick set last_tick_id = a_tick_id where consumer_id = a_consumer; if not found then insert into pgq_ext.completed_tick (consumer_id, last_tick_id) values (a_consumer, a_tick_id); end if; end if; return 1; end; $$ language plpgsql security definer; create or replace function pgq_ext.version() returns text as $$ begin return '3.0.0.1'; end; $$ language plpgsql; skytools-3.2.6/sql/pgq_ext/sql/test_pgq_ext.sql0000644000000000000000000000156012426435645016610 0ustar -- -- test batch tracking -- select pgq_ext.is_batch_done('c', 1); select pgq_ext.set_batch_done('c', 1); select pgq_ext.is_batch_done('c', 1); select pgq_ext.set_batch_done('c', 1); select pgq_ext.is_batch_done('c', 2); select pgq_ext.set_batch_done('c', 2); -- -- test event tracking -- select pgq_ext.is_batch_done('c', 3); select pgq_ext.is_event_done('c', 3, 101); select pgq_ext.set_event_done('c', 3, 101); select pgq_ext.is_event_done('c', 3, 101); select pgq_ext.set_event_done('c', 3, 101); select pgq_ext.set_batch_done('c', 3); select * from pgq_ext.completed_event order by 1,2; -- -- test tick tracking -- select pgq_ext.get_last_tick('c'); select pgq_ext.set_last_tick('c', 1); select pgq_ext.get_last_tick('c'); select pgq_ext.set_last_tick('c', 2); select pgq_ext.get_last_tick('c'); select pgq_ext.set_last_tick('c', NULL); select pgq_ext.get_last_tick('c'); skytools-3.2.6/sql/pgq_ext/sql/init_noext.sql0000644000000000000000000000005012426435645016253 0ustar \set ECHO off \i structure/install.sql skytools-3.2.6/sql/pgq_ext/sql/init_ext.sql0000644000000000000000000000054212426435645015724 0ustar \set ECHO off \i structure/install.sql \set ECHO all create extension pgq_ext from 'unpackaged'; select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_ext'; drop extension pgq_ext; create extension pgq_ext; select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_ext'; skytools-3.2.6/sql/pgq_ext/README.pgq_ext0000644000000000000000000000150612426435645015110 0ustar Track processed batches and events in target DB ================================================ Batch tracking is OK. Event tracking is OK if consumer does not use retry queue. Batch tracking -------------- is_batch_done(consumer, batch) returns: true - batch is done already false - batch is not done yet set_batch_done(consumer, batch) returns: true - tagging successful, batch was not done yet false - batch was done already Event tracking -------------- is_batch_done(consumer, batch, event) returns: true - event is done false - event is not done yet set_batch_done(consumer, batch, event) returns: true - tagging was successful, event was not done false - event is done already Fastvacuum ---------- pgq.ext.completed_batch pgq.ext.completed_event pgq.ext.completed_tick pgq.ext.partial_batch skytools-3.2.6/sql/pgq_ext/expected/0000755000000000000000000000000012426435645014361 5ustar skytools-3.2.6/sql/pgq_ext/expected/test_pgq_ext.out0000644000000000000000000000367012426435645017626 0ustar -- -- test batch tracking -- select pgq_ext.is_batch_done('c', 1); is_batch_done --------------- f (1 row) select pgq_ext.set_batch_done('c', 1); set_batch_done ---------------- t (1 row) select pgq_ext.is_batch_done('c', 1); is_batch_done --------------- t (1 row) select pgq_ext.set_batch_done('c', 1); set_batch_done ---------------- f (1 row) select pgq_ext.is_batch_done('c', 2); is_batch_done --------------- f (1 row) select pgq_ext.set_batch_done('c', 2); set_batch_done ---------------- t (1 row) -- -- test event tracking -- select pgq_ext.is_batch_done('c', 3); is_batch_done --------------- f (1 row) select pgq_ext.is_event_done('c', 3, 101); is_event_done --------------- f (1 row) select pgq_ext.set_event_done('c', 3, 101); set_event_done ---------------- t (1 row) select pgq_ext.is_event_done('c', 3, 101); is_event_done --------------- t (1 row) select pgq_ext.set_event_done('c', 3, 101); set_event_done ---------------- f (1 row) select pgq_ext.set_batch_done('c', 3); set_batch_done ---------------- t (1 row) select * from pgq_ext.completed_event order by 1,2; consumer_id | subconsumer_id | batch_id | event_id -------------+----------------+----------+---------- c | | 3 | 101 (1 row) -- -- test tick tracking -- select pgq_ext.get_last_tick('c'); get_last_tick --------------- (1 row) select pgq_ext.set_last_tick('c', 1); set_last_tick --------------- 1 (1 row) select pgq_ext.get_last_tick('c'); get_last_tick --------------- 1 (1 row) select pgq_ext.set_last_tick('c', 2); set_last_tick --------------- 1 (1 row) select pgq_ext.get_last_tick('c'); get_last_tick --------------- 2 (1 row) select pgq_ext.set_last_tick('c', NULL); set_last_tick --------------- 1 (1 row) select pgq_ext.get_last_tick('c'); get_last_tick --------------- (1 row) skytools-3.2.6/sql/pgq_ext/expected/init_noext.out0000644000000000000000000000011112426435645017263 0ustar \set ECHO off upgrade_schema ---------------- 4 (1 row) skytools-3.2.6/sql/pgq_ext/expected/test_upgrade.out0000644000000000000000000000376312426435645017611 0ustar \set ECHO off upgrade_schema ---------------- 8 (1 row) -- -- test batch tracking -- select pgq_ext.is_batch_done('c', 1); is_batch_done --------------- f (1 row) select pgq_ext.set_batch_done('c', 1); set_batch_done ---------------- t (1 row) select pgq_ext.is_batch_done('c', 1); is_batch_done --------------- t (1 row) select pgq_ext.set_batch_done('c', 1); set_batch_done ---------------- f (1 row) select pgq_ext.is_batch_done('c', 2); is_batch_done --------------- f (1 row) select pgq_ext.set_batch_done('c', 2); set_batch_done ---------------- t (1 row) -- -- test event tracking -- select pgq_ext.is_batch_done('c', 3); is_batch_done --------------- f (1 row) select pgq_ext.is_event_done('c', 3, 101); is_event_done --------------- f (1 row) select pgq_ext.set_event_done('c', 3, 101); set_event_done ---------------- t (1 row) select pgq_ext.is_event_done('c', 3, 101); is_event_done --------------- t (1 row) select pgq_ext.set_event_done('c', 3, 101); set_event_done ---------------- f (1 row) select pgq_ext.set_batch_done('c', 3); set_batch_done ---------------- t (1 row) select * from pgq_ext.completed_event order by 1,2; consumer_id | batch_id | event_id | subconsumer_id -------------+----------+----------+---------------- c | 3 | 101 | (1 row) -- -- test tick tracking -- select pgq_ext.get_last_tick('c'); get_last_tick --------------- (1 row) select pgq_ext.set_last_tick('c', 1); set_last_tick --------------- 1 (1 row) select pgq_ext.get_last_tick('c'); get_last_tick --------------- 1 (1 row) select pgq_ext.set_last_tick('c', 2); set_last_tick --------------- 1 (1 row) select pgq_ext.get_last_tick('c'); get_last_tick --------------- 2 (1 row) select pgq_ext.set_last_tick('c', NULL); set_last_tick --------------- 1 (1 row) select pgq_ext.get_last_tick('c'); get_last_tick --------------- (1 row) skytools-3.2.6/sql/pgq_ext/expected/init_ext.out0000644000000000000000000000070512426435645016737 0ustar \set ECHO off upgrade_schema ---------------- 4 (1 row) create extension pgq_ext from 'unpackaged'; select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_ext'; dumpable ---------- 4 (1 row) drop extension pgq_ext; create extension pgq_ext; select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_ext'; dumpable ---------- 4 (1 row) skytools-3.2.6/sql/pgq_ext/docs/0000755000000000000000000000000012426435645013510 5ustar skytools-3.2.6/sql/pgq_ext/docs/Topics.txt0000644000000000000000000000650512426435645015520 0ustar Format: 1.52 # This is the Natural Docs topics file for this project. If you change anything # here, it will apply to THIS PROJECT ONLY. If you'd like to change something # for all your projects, edit the Topics.txt in Natural Docs' Config directory # instead. # If you'd like to prevent keywords from being recognized by Natural Docs, you # can do it like this: # Ignore Keywords: [keyword], [keyword], ... # # Or you can use the list syntax like how they are defined: # Ignore Keywords: # [keyword] # [keyword], [plural keyword] # ... #------------------------------------------------------------------------------- # SYNTAX: # # Topic Type: [name] # Alter Topic Type: [name] # Creates a new topic type or alters one from the main file. Each type gets # its own index and behavior settings. Its name can have letters, numbers, # spaces, and these charaters: - / . ' # # Plural: [name] # Sets the plural name of the topic type, if different. # # Keywords: # [keyword] # [keyword], [plural keyword] # ... # Defines or adds to the list of keywords for the topic type. They may only # contain letters, numbers, and spaces and are not case sensitive. Plural # keywords are used for list topics. You can redefine keywords found in the # main topics file. # # Index: [yes|no] # Whether the topics get their own index. Defaults to yes. Everything is # included in the general index regardless of this setting. # # Scope: [normal|start|end|always global] # How the topics affects scope. Defaults to normal. # normal - Topics stay within the current scope. # start - Topics start a new scope for all the topics beneath it, # like class topics. # end - Topics reset the scope back to global for all the topics # beneath it. # always global - Topics are defined as global, but do not change the scope # for any other topics. # # Class Hierarchy: [yes|no] # Whether the topics are part of the class hierarchy. Defaults to no. # # Page Title If First: [yes|no] # Whether the topic's title becomes the page title if it's the first one in # a file. Defaults to no. # # Break Lists: [yes|no] # Whether list topics should be broken into individual topics in the output. # Defaults to no. # # Can Group With: [type], [type], ... # Defines a list of topic types that this one can possibly be grouped with. # Defaults to none. #------------------------------------------------------------------------------- # The following topics are defined in the main file, if you'd like to alter # their behavior or add keywords: # # Generic, Class, Interface, Section, File, Group, Function, Variable, # Property, Type, Constant, Enumeration, Event, Delegate, Macro, # Database, Database Table, Database View, Database Index, Database # Cursor, Database Trigger, Cookie, Build Target # If you add something that you think would be useful to other developers # and should be included in Natural Docs by default, please e-mail it to # topics [at] naturaldocs [dot] org. Topic Type: Schema Plural: Schemas Index: No Scope: Start Class Hierarchy: Yes Keywords: schema, schemas Alter Topic Type: Function Add Keywords: public function internal function Alter Topic Type: File Index: No skytools-3.2.6/sql/pgq_ext/docs/Languages.txt0000644000000000000000000001202112426435645016153 0ustar Format: 1.52 # This is the Natural Docs languages file for this project. If you change # anything here, it will apply to THIS PROJECT ONLY. If you'd like to change # something for all your projects, edit the Languages.txt in Natural Docs' # Config directory instead. Ignore Extension: sql #------------------------------------------------------------------------------- # SYNTAX: # # Unlike other Natural Docs configuration files, in this file all comments # MUST be alone on a line. Some languages deal with the # character, so you # cannot put comments on the same line as content. # # Also, all lists are separated with spaces, not commas, again because some # languages may need to use them. # # Language: [name] # Alter Language: [name] # Defines a new language or alters an existing one. Its name can use any # characters. If any of the properties below have an add/replace form, you # must use that when using Alter Language. # # The language Shebang Script is special. It's entry is only used for # extensions, and files with those extensions have their shebang (#!) lines # read to determine the real language of the file. Extensionless files are # always treated this way. # # The language Text File is also special. It's treated as one big comment # so you can put Natural Docs content in them without special symbols. Also, # if you don't specify a package separator, ignored prefixes, or enum value # behavior, it will copy those settings from the language that is used most # in the source tree. # # Extensions: [extension] [extension] ... # [Add/Replace] Extensions: [extension] [extension] ... # Defines the file extensions of the language's source files. You can # redefine extensions found in the main languages file. You can use * to # mean any undefined extension. # # Shebang Strings: [string] [string] ... # [Add/Replace] Shebang Strings: [string] [string] ... # Defines a list of strings that can appear in the shebang (#!) line to # designate that it's part of the language. You can redefine strings found # in the main languages file. # # Ignore Prefixes in Index: [prefix] [prefix] ... # [Add/Replace] Ignored Prefixes in Index: [prefix] [prefix] ... # # Ignore [Topic Type] Prefixes in Index: [prefix] [prefix] ... # [Add/Replace] Ignored [Topic Type] Prefixes in Index: [prefix] [prefix] ... # Specifies prefixes that should be ignored when sorting symbols in an # index. Can be specified in general or for a specific topic type. # #------------------------------------------------------------------------------ # For basic language support only: # # Line Comments: [symbol] [symbol] ... # Defines a space-separated list of symbols that are used for line comments, # if any. # # Block Comments: [opening sym] [closing sym] [opening sym] [closing sym] ... # Defines a space-separated list of symbol pairs that are used for block # comments, if any. # # Package Separator: [symbol] # Defines the default package separator symbol. The default is a dot. # # [Topic Type] Prototype Enders: [symbol] [symbol] ... # When defined, Natural Docs will attempt to get a prototype from the code # immediately following the topic type. It stops when it reaches one of # these symbols. Use \n for line breaks. # # Line Extender: [symbol] # Defines the symbol that allows a prototype to span multiple lines if # normally a line break would end it. # # Enum Values: [global|under type|under parent] # Defines how enum values are referenced. The default is global. # global - Values are always global, referenced as 'value'. # under type - Values are under the enum type, referenced as # 'package.enum.value'. # under parent - Values are under the enum's parent, referenced as # 'package.value'. # # Perl Package: [perl package] # Specifies the Perl package used to fine-tune the language behavior in ways # too complex to do in this file. # #------------------------------------------------------------------------------ # For full language support only: # # Full Language Support: [perl package] # Specifies the Perl package that has the parsing routines necessary for full # language support. # #------------------------------------------------------------------------------- # The following languages are defined in the main file, if you'd like to alter # them: # # Text File, Shebang Script, C/C++, C#, Java, JavaScript, Perl, Python, # PHP, SQL, Visual Basic, Pascal, Assembly, Ada, Tcl, Ruby, Makefile, # ActionScript, ColdFusion, R, Fortran # If you add a language that you think would be useful to other developers # and should be included in Natural Docs by default, please e-mail it to # languages [at] naturaldocs [dot] org. Language: PLPGSQL Extension: sql Line Comment: -- Block Comment: /* */ Enum Values: Global Function Prototype Enders: , ; ) $ ' Variable Prototype Enders: , ; ) := default Default DEFAULT Database Index Prototype Enders: , ; ) Database Trigger Prototype Enders: begin Begin BEGIN skytools-3.2.6/sql/pgq_ext/docs/Menu.txt0000644000000000000000000000370412426435645015161 0ustar Format: 1.52 # You can add a title and sub-title to your menu like this: # Title: [project name] # SubTitle: [subtitle] # You can add a footer to your documentation like this: # Footer: [text] # If you want to add a copyright notice, this would be the place to do it. # You can add a timestamp to your documentation like one of these: # Timestamp: Generated on month day, year # Timestamp: Updated mm/dd/yyyy # Timestamp: Last updated mon day # # m - One or two digit month. January is "1" # mm - Always two digit month. January is "01" # mon - Short month word. January is "Jan" # month - Long month word. January is "January" # d - One or two digit day. 1 is "1" # dd - Always two digit day. 1 is "01" # day - Day with letter extension. 1 is "1st" # yy - Two digit year. 2006 is "06" # yyyy - Four digit year. 2006 is "2006" # year - Four digit year. 2006 is "2006" # -------------------------------------------------------------------------- # # Cut and paste the lines below to change the order in which your files # appear on the menu. Don't worry about adding or removing files, Natural # Docs will take care of that. # # You can further organize the menu by grouping the entries. Add a # "Group: [name] {" line to start a group, and add a "}" to end it. # # You can add text and web links to the menu by adding "Text: [text]" and # "Link: [name] ([URL])" lines, respectively. # # The formatting and comments are auto-generated, so don't worry about # neatness when editing the file. Natural Docs will clean it up the next # time it is run. When working with groups, just deal with the braces and # forget about the indentation and comments. # # -------------------------------------------------------------------------- File: Functions (functions.sql) File: Tables (schema.sql) Group: Index { Index: Everything Database Table Index: Database Tables Function Index: Functions } # Group: Index skytools-3.2.6/sql/pgq_ext/structure/0000755000000000000000000000000012426435645014620 5ustar skytools-3.2.6/sql/pgq_ext/structure/ext_postproc.sql0000644000000000000000000000051412426435645020072 0ustar -- tag data objects as dumpable SELECT pg_catalog.pg_extension_config_dump('pgq_ext.completed_tick', ''); SELECT pg_catalog.pg_extension_config_dump('pgq_ext.completed_batch', ''); SELECT pg_catalog.pg_extension_config_dump('pgq_ext.completed_event', ''); SELECT pg_catalog.pg_extension_config_dump('pgq_ext.partial_batch', ''); skytools-3.2.6/sql/pgq_ext/structure/tables.sql0000644000000000000000000000645612426435645016626 0ustar -- ---------------------------------------------------------------------- -- Section: Tables -- -- The pgq_ext schema exists to help in making sure that all events get -- processed and they get processed only once. -- -- Simple guidelines for avoiding duplicate events: -- -- It is pretty burdensome to check if event is already processed, -- especially on bulk data moving. Here's a way how individual -- event checks can be avoided by tracking processing of batches. -- -- First, consumer must guarantee that it processes all events in one tx. -- -- Consumer itself can tag events for retry, but then -- it must be able to handle them later. -- -- Simple case: Only one db: -- -- If the PgQ queue and event data handling happen in same database, -- the consumer must simply call pgq.finish_batch() inside -- the event-processing transaction. -- -- Several databases: -- -- If the event processing happens in different database, the consumer -- must store the batch_id into destination database, inside the same -- transaction as the event processing happens. -- -- * Only after committing it, consumer can call pgq.finish_batch() -- in queue database and commit that. -- -- * As the batches come in sequence, there's no need to remember -- full log of batch_id's, it's enough to keep the latest batch_id. -- -- * Then at the start of every batch, consumer can check if the batch_id already -- exists in destination database, and if it does, then just tag batch done, -- without processing. -- -- With this, there's no need for consumer to check for already processed -- events. -- -- Note: -- -- This assumes the event processing is transactional and failures -- will be rollbacked. If event processing includes communication with -- world outside database, eg. sending email, such handling won't work. -- -- ---------------------------------------------------------------------- set client_min_messages = 'warning'; set default_with_oids = 'off'; create schema pgq_ext; -- -- Table: pgq_ext.completed_tick -- -- Used for tracking last completed batch tracking -- via tick_id. -- create table pgq_ext.completed_tick ( consumer_id text not null, subconsumer_id text not null, last_tick_id bigint not null, primary key (consumer_id, subconsumer_id) ); -- -- Table: pgq_ext.completed_batch -- -- Used for tracking last completed batch tracking -- create table pgq_ext.completed_batch ( consumer_id text not null, subconsumer_id text not null, last_batch_id bigint not null, primary key (consumer_id, subconsumer_id) ); -- -- Table: pgq_ext.completed_event -- -- Stored completed event in current partial batch. -- create table pgq_ext.completed_event ( consumer_id text not null, subconsumer_id text not null, batch_id bigint not null, event_id bigint not null, primary key (consumer_id, subconsumer_id, batch_id, event_id) ); -- -- Table: pgq_ext.partial_batch -- -- Stored current in-progress batch -- create table pgq_ext.partial_batch ( consumer_id text not null, subconsumer_id text not null, cur_batch_id bigint not null, primary key (consumer_id, subconsumer_id) ); skytools-3.2.6/sql/pgq_ext/structure/ext_unpackaged.sql0000644000000000000000000000043012426435645020320 0ustar ALTER EXTENSION pgq_ext ADD SCHEMA pgq_ext; ALTER EXTENSION pgq_ext ADD TABLE pgq_ext.completed_tick; ALTER EXTENSION pgq_ext ADD TABLE pgq_ext.completed_batch; ALTER EXTENSION pgq_ext ADD TABLE pgq_ext.completed_event; ALTER EXTENSION pgq_ext ADD TABLE pgq_ext.partial_batch; skytools-3.2.6/sql/pgq_ext/structure/upgrade.sql0000644000000000000000000000077312426435645016777 0ustar -- -- Section: Functions -- \i functions/pgq_ext.upgrade_schema.sql select pgq_ext.upgrade_schema(); -- Group: track batches via batch id \i functions/pgq_ext.is_batch_done.sql \i functions/pgq_ext.set_batch_done.sql -- Group: track batches via tick id \i functions/pgq_ext.get_last_tick.sql \i functions/pgq_ext.set_last_tick.sql -- Group: Track events separately \i functions/pgq_ext.is_event_done.sql \i functions/pgq_ext.set_event_done.sql -- Group: Schema info \i functions/pgq_ext.version.sql skytools-3.2.6/sql/pgq_ext/structure/grants.ini0000644000000000000000000000133112426435645016615 0ustar [GrantFu] roles = pgq_writer, public [1.public] on.functions = pgq_ext.version() public = execute [2.pgq_ext] on.functions = %(pgq_ext_fns)s pgq_writer = execute [DEFAULT] pgq_ext_fns = pgq_ext.upgrade_schema(), pgq_ext.is_batch_done(text, text, bigint), pgq_ext.is_batch_done(text, bigint), pgq_ext.set_batch_done(text, text, bigint), pgq_ext.set_batch_done(text, bigint), pgq_ext.is_event_done(text, text, bigint, bigint), pgq_ext.is_event_done(text, bigint, bigint), pgq_ext.set_event_done(text, text, bigint, bigint), pgq_ext.set_event_done(text, bigint, bigint), pgq_ext.get_last_tick(text, text), pgq_ext.get_last_tick(text), pgq_ext.set_last_tick(text, text, bigint), pgq_ext.set_last_tick(text, bigint) skytools-3.2.6/sql/pgq_ext/structure/grants.sql0000644000000000000000000000005312426435645016635 0ustar grant usage on schema pgq_ext to public; skytools-3.2.6/sql/pgq_ext/structure/install.sql0000644000000000000000000000011212426435645017001 0ustar \i structure/tables.sql \i structure/upgrade.sql \i structure/grants.sql skytools-3.2.6/sql/dispatch/0000755000000000000000000000000012426435645012710 5ustar skytools-3.2.6/sql/dispatch/create_partition.sql0000644000000000000000000001116512426435645016771 0ustar create or replace function public.create_partition( i_table text, i_part text, i_pkeys text, i_part_field text, i_part_time timestamptz, i_part_period text ) returns int as $$ ------------------------------------------------------------------------ -- Function: public.create_partition -- -- Creates child table for aggregation function for either monthly or daily if it does not exist yet. -- Locks parent table for child table creating. -- -- Parameters: -- i_table - name of parent table -- i_part - name of partition table to create -- i_pkeys - primary key fields (comma separated, used to create constraint). -- i_part_field - field used to partition table (when not partitioned by field, value is NULL) -- i_part_time - partition time -- i_part_period - period of partitioned data, current possible values are 'hour', 'day', 'month' and 'year' -- -- Example: -- select public.create_partition('aggregate.user_call_monthly', 'aggregate.user_call_monthly_2010_01', 'key_user', 'period_start', '2010-01-10 11:00'::timestamptz, 'month'); -- ------------------------------------------------------------------------ declare chk_start text; chk_end text; part_start timestamptz; part_end timestamptz; parent_schema text; parent_name text; part_schema text; part_name text; pos int4; fq_table text; fq_part text; q_grantee text; g record; r record; sql text; pgver integer; begin if i_table is null or i_part is null then raise exception 'need table and part'; end if; -- load postgres version (XYYZZ). show server_version_num into pgver; -- parent table schema and name + quoted name pos := position('.' in i_table); if pos > 0 then parent_schema := substring(i_table for pos - 1); parent_name := substring(i_table from pos + 1); else parent_schema := 'public'; parent_name := i_table; end if; fq_table := quote_ident(parent_schema) || '.' || quote_ident(parent_name); -- part table schema and name + quoted name pos := position('.' in i_part); if pos > 0 then part_schema := substring(i_part for pos - 1); part_name := substring(i_part from pos + 1); else part_schema := 'public'; part_name := i_part; end if; fq_part := quote_ident(part_schema) || '.' || quote_ident(part_name); -- allow only single creation at a time, without affecting DML operations execute 'lock table ' || fq_table || ' in share update exclusive mode'; -- check if part table exists perform 1 from pg_class t, pg_namespace s where t.relnamespace = s.oid and s.nspname = part_schema and t.relname = part_name; if found then return 0; end if; -- need to use 'like' to get indexes sql := 'create table ' || fq_part || ' (like ' || fq_table; if pgver >= 90000 then sql := sql || ' including all'; else sql := sql || ' including indexes including constraints including defaults'; end if; sql := sql || ') inherits (' || fq_table || ')'; execute sql; -- extra check constraint if i_part_field != '' then part_start := date_trunc(i_part_period, i_part_time); part_end := part_start + ('1 ' || i_part_period)::interval; chk_start := quote_literal(to_char(part_start, 'YYYY-MM-DD HH24:MI:SS')); chk_end := quote_literal(to_char(part_end, 'YYYY-MM-DD HH24:MI:SS')); sql := 'alter table '|| fq_part || ' add check (' || quote_ident(i_part_field) || ' >= ' || chk_start || ' and ' || quote_ident(i_part_field) || ' < ' || chk_end || ')'; execute sql; end if; -- load grants from parent table for g in select grantor, grantee, privilege_type, is_grantable from information_schema.table_privileges where table_schema = parent_schema and table_name = parent_name loop if g.grantee = 'PUBLIC' then q_grantee = 'public'; else q_grantee := quote_ident(g.grantee); end if; sql := 'grant ' || g.privilege_type || ' on ' || fq_part || ' to ' || q_grantee; if g.is_grantable = 'YES' then sql := sql || ' with grant option'; end if; execute sql; end loop; return 1; end; $$ language plpgsql; -- drop old function with timestamp drop function if exists public.create_partition(text, text, text, text, timestamp, text); skytools-3.2.6/sql/dispatch/Makefile0000644000000000000000000000041012426435645014343 0ustar REGRESS = test_create_part REGRESS_OPTS = --load-language=plpgsql --load-language=plpythonu PG_CONFIG = pg_config PGXS = $(shell $(PG_CONFIG) --pgxs) include $(PGXS) test: make installcheck || { less regression.diffs ; exit 1; } ack: cp results/* expected/ skytools-3.2.6/sql/dispatch/sql/0000755000000000000000000000000012426435645013507 5ustar skytools-3.2.6/sql/dispatch/sql/test_create_part.sql0000644000000000000000000000246212426435645017564 0ustar \set ECHO none set log_error_verbosity = 'terse'; set client_min_messages = 'warning'; \i create_partition.sql \set ECHO all drop role if exists ptest1; drop role if exists ptest2; create group ptest1; create group ptest2; create table events ( id int4 primary key, txt text not null, ctime timestamptz not null default now(), someval int4 check (someval > 0) ); create index ctime_idx on events (ctime); create rule ignore_dups AS on insert to events where (exists (select 1 from events where (events.id = new.id))) do instead nothing; grant select,delete on events to ptest1; grant select,update,delete on events to ptest2 with grant option; select create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01', 'month'); select create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01'::timestamptz, 'month'); select create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01'::timestamp, 'month'); select count(*) from pg_indexes where schemaname='public' and tablename = 'events_2011_01'; select count(*) from pg_constraint where conrelid = 'public.events_2011_01'::regclass; select count(*) from pg_rules where schemaname = 'public' and tablename = 'events_2011_01'; -- \d events_2011_01 -- \dp events -- \dp events_2011_01 skytools-3.2.6/sql/dispatch/expected/0000755000000000000000000000000012426435645014511 5ustar skytools-3.2.6/sql/dispatch/expected/test_create_part.out0000644000000000000000000000273112426435645020575 0ustar \set ECHO none drop role if exists ptest1; drop role if exists ptest2; create group ptest1; create group ptest2; create table events ( id int4 primary key, txt text not null, ctime timestamptz not null default now(), someval int4 check (someval > 0) ); create index ctime_idx on events (ctime); create rule ignore_dups AS on insert to events where (exists (select 1 from events where (events.id = new.id))) do instead nothing; grant select,delete on events to ptest1; grant select,update,delete on events to ptest2 with grant option; select create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01', 'month'); create_partition ------------------ 1 (1 row) select create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01'::timestamptz, 'month'); create_partition ------------------ 0 (1 row) select create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01'::timestamp, 'month'); create_partition ------------------ 0 (1 row) select count(*) from pg_indexes where schemaname='public' and tablename = 'events_2011_01'; count ------- 2 (1 row) select count(*) from pg_constraint where conrelid = 'public.events_2011_01'::regclass; count ------- 3 (1 row) select count(*) from pg_rules where schemaname = 'public' and tablename = 'events_2011_01'; count ------- 0 (1 row) -- \d events_2011_01 -- \dp events -- \dp events_2011_01 skytools-3.2.6/sql/pgq_node/0000755000000000000000000000000012426435645012705 5ustar skytools-3.2.6/sql/pgq_node/Makefile0000644000000000000000000000067412426435645014354 0ustar EXTENSION = pgq_node EXT_VERSION = 3.2.5 EXT_OLD_VERSIONS = 3.1 3.1.3 3.1.6 3.2 Extension_regress = pgq_node_init_ext pgq_node_test Contrib_regress = pgq_node_init_noext pgq_node_test include ../common-pgxs.mk # # docs # dox: cleandox $(SRCS) mkdir -p docs/html mkdir -p docs/sql $(CATSQL) --ndoc structure/tables.sql > docs/sql/pgq_node.sql $(CATSQL) --ndoc structure/functions.sql > docs/sql/functions.sql $(NDOC) $(NDOCARGS) skytools-3.2.6/sql/pgq_node/functions/0000755000000000000000000000000012426435645014715 5ustar skytools-3.2.6/sql/pgq_node/functions/pgq_node.register_subscriber.sql0000644000000000000000000000413212426435645023300 0ustar create or replace function pgq_node.register_subscriber( in i_queue_name text, in i_remote_node_name text, in i_remote_worker_name text, in i_custom_tick_id int8, out ret_code int4, out ret_note text, out global_watermark bigint) returns record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.register_subscriber(4) -- -- Subscribe remote node to local node at custom position. -- Should be used when changing provider for existing node. -- -- Parameters: -- i_node_name - set name -- i_remote_node_name - node name -- i_remote_worker_name - consumer name -- i_custom_tick_id - tick id [optional] -- -- Returns: -- ret_code - error code -- ret_note - description -- global_watermark - minimal watermark -- ---------------------------------------------------------------------- declare n record; node_wm_name text; node_pos bigint; begin select node_type into n from pgq_node.node_info where queue_name = i_queue_name for update; if not found then select 404, 'Unknown queue: ' || i_queue_name into ret_code, ret_note; return; end if; select last_tick into global_watermark from pgq.get_consumer_info(i_queue_name, '.global_watermark'); if n.node_type not in ('root', 'branch') then select 401, 'Cannot subscribe to ' || n.node_type || ' node' into ret_code, ret_note; return; end if; node_wm_name := '.' || i_remote_node_name || '.watermark'; node_pos := coalesce(i_custom_tick_id, global_watermark); perform pgq.register_consumer_at(i_queue_name, node_wm_name, global_watermark); perform pgq.register_consumer_at(i_queue_name, i_remote_worker_name, node_pos); insert into pgq_node.subscriber_info (queue_name, subscriber_node, worker_name, watermark_name) values (i_queue_name, i_remote_node_name, i_remote_worker_name, node_wm_name); select 200, 'Subscriber registered: '||i_remote_node_name into ret_code, ret_note; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.maint_watermark.sql0000644000000000000000000000151512426435645022420 0ustar create or replace function pgq_node.maint_watermark(i_queue_name text) returns int4 as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.maint_watermark(1) -- -- Move global watermark on root node. -- -- Returns: -- 0 - tells pgqd to call just once -- ---------------------------------------------------------------------- declare _lag interval; begin perform 1 from pgq_node.node_info where queue_name = i_queue_name and node_type = 'root' for update; if not found then return 0; end if; select lag into _lag from pgq.get_consumer_info(i_queue_name, '.global_watermark'); if _lag >= '5 minutes'::interval then perform pgq_node.set_global_watermark(i_queue_name, NULL); end if; return 0; end; $$ language plpgsql; skytools-3.2.6/sql/pgq_node/functions/pgq_node.set_consumer_paused.sql0000644000000000000000000000312712426435645023303 0ustar create or replace function pgq_node.set_consumer_paused( in i_queue_name text, in i_consumer_name text, in i_paused boolean, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.set_consumer_paused(3) -- -- Set consumer paused flag. -- -- Parameters: -- i_queue_name - cascaded queue name -- i_consumer_name - cascaded consumer name -- i_paused - new flag state -- Returns: -- 200 - ok -- 201 - already paused -- 404 - consumer not found -- ---------------------------------------------------------------------- declare old_flag boolean; word text; begin if i_paused then word := 'paused'; else word := 'resumed'; end if; select paused into old_flag from pgq_node.local_state where queue_name = i_queue_name and consumer_name = i_consumer_name for update; if not found then select 404, 'Unknown consumer: ' || i_consumer_name into ret_code, ret_note; elsif old_flag = i_paused then select 201, 'Consumer ' || i_consumer_name || ' already ' || word into ret_code, ret_note; else update pgq_node.local_state set paused = i_paused, uptodate = false where queue_name = i_queue_name and consumer_name = i_consumer_name; select 200, 'Consumer '||i_consumer_name||' tagged as '||word into ret_code, ret_note; end if; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.drop_node.sql0000644000000000000000000000473412426435645021212 0ustar create or replace function pgq_node.drop_node( in i_queue_name text, in i_node_name text, out ret_code int4, out ret_note text) returns record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.drop_node(2) -- -- Drop node. This needs to be run on all the members of a set -- to properly get rid of the node. -- -- Parameters: -- i_queue_name - queue name -- i_node_name - node_name -- -- Returns: -- ret_code - error code -- ret_note - error description -- -- Return Codes: -- 200 - Ok -- 304 - No such queue -- 406 - That is a provider -- Calls: -- None -- Tables directly manipulated: -- None ------------------------------------------------------------------------ declare _is_local boolean; _is_prov boolean; begin select (n.node_name = i_node_name), (select s.provider_node = i_node_name from pgq_node.local_state s where s.queue_name = i_queue_name and s.consumer_name = n.worker_name) into _is_local, _is_prov from pgq_node.node_info n where n.queue_name = i_queue_name; if not found then -- proceed with cleaning anyway, as there schenarios -- where some data is left around _is_prov := false; _is_local := true; end if; -- drop local state if _is_local then delete from pgq_node.subscriber_info where queue_name = i_queue_name; delete from pgq_node.local_state where queue_name = i_queue_name; delete from pgq_node.node_info where queue_name = i_queue_name and node_name = i_node_name; perform pgq.drop_queue(queue_name, true) from pgq.queue where queue_name = i_queue_name; delete from pgq_node.node_location where queue_name = i_queue_name and node_name <> i_node_name; elsif _is_prov then select 405, 'Cannot drop provider node: ' || i_node_name into ret_code, ret_note; return; else perform pgq_node.unregister_subscriber(i_queue_name, i_node_name); end if; -- let the unregister_location send event if needed select f.ret_code, f.ret_note from pgq_node.unregister_location(i_queue_name, i_node_name) f into ret_code, ret_note; select 200, 'Node dropped: ' || i_node_name into ret_code, ret_note; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.get_subscriber_info.sql0000644000000000000000000000256312426435645023254 0ustar create or replace function pgq_node.get_subscriber_info( in i_queue_name text, out node_name text, out worker_name text, out node_watermark int8) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.get_subscriber_info(1) -- -- Get subscriber list for the local node. -- -- It may be out-of-date, due to in-progress -- administrative change. -- Node's local provider info ( pgq_node.get_node_info() or pgq_node.get_worker_state(1) ) -- is the authoritative source. -- -- Parameters: -- i_queue_name - cascaded queue name -- -- Returns: -- node_name - node name that uses current node as provider -- worker_name - consumer that maintains remote node -- local_watermark - lowest tick_id on subscriber -- ---------------------------------------------------------------------- declare _watermark_name text; begin for node_name, worker_name, _watermark_name in select s.subscriber_node, s.worker_name, s.watermark_name from pgq_node.subscriber_info s where s.queue_name = i_queue_name order by 1 loop select last_tick into node_watermark from pgq.get_consumer_info(i_queue_name, _watermark_name); return next; end loop; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.is_root_node.sql0000644000000000000000000000130512426435645021713 0ustar create or replace function pgq_node.is_root_node(i_queue_name text) returns bool as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.is_root_node(1) -- -- Checs if node is root. -- -- Parameters: -- i_queue_name - queue name -- Returns: -- true - if this this the root node for queue -- ---------------------------------------------------------------------- declare res bool; begin select n.node_type = 'root' into res from pgq_node.node_info n where n.queue_name = i_queue_name; if not found then raise exception 'queue does not exist: %', i_queue_name; end if; return res; end; $$ language plpgsql; skytools-3.2.6/sql/pgq_node/functions/pgq_node.set_subscriber_watermark.sql0000644000000000000000000000315712426435645024332 0ustar create or replace function pgq_node.set_subscriber_watermark( in i_queue_name text, in i_node_name text, in i_watermark bigint, out ret_code int4, out ret_note text) returns record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.set_subscriber_watermark(3) -- -- Notify provider about subscribers lowest watermark. -- -- Called on provider at interval by each worker -- -- Parameters: -- i_queue_name - cascaded queue name -- i_node_name - subscriber node name -- i_watermark - tick_id -- -- Returns: -- ret_code - error code -- ret_note - description -- ---------------------------------------------------------------------- declare n record; wm_name text; begin wm_name := '.' || i_node_name || '.watermark'; select * into n from pgq.get_consumer_info(i_queue_name, wm_name); if not found then select 404, 'node '||i_node_name||' not subscribed to queue ', i_queue_name into ret_code, ret_note; return; end if; -- todo: check if wm sane? if i_watermark < n.last_tick then select 405, 'watermark must not be moved backwards' into ret_code, ret_note; return; elsif i_watermark = n.last_tick then select 100, 'watermark already set' into ret_code, ret_note; return; end if; perform pgq.register_consumer_at(i_queue_name, wm_name, i_watermark); select 200, wm_name || ' set to ' || i_watermark::text into ret_code, ret_note; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.unregister_location.sql0000644000000000000000000000443612426435645023317 0ustar create or replace function pgq_node.unregister_location( in i_queue_name text, in i_node_name text, out ret_code int4, out ret_note text) returns record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.unregister_location(2) -- -- Drop unreferenced node. -- -- Parameters: -- i_queue_name - queue name -- i_node_name - node to drop -- -- Returns: -- ret_code - error code -- ret_note - error description -- -- Return Codes: -- 200 - Ok -- 301 - Location not found -- 403 - Cannot drop nodes own or parent location -- ---------------------------------------------------------------------- declare _queue_name text; _wm_consumer text; _global_wm bigint; sub record; node record; begin select n.node_name, n.node_type, s.provider_node into node from pgq_node.node_info n left join pgq_node.local_state s on (s.consumer_name = n.worker_name and s.queue_name = n.queue_name) where n.queue_name = i_queue_name; if found then if node.node_name = i_node_name then select 403, 'Cannot drop nodes own location' into ret_code, ret_note; return; end if; if node.provider_node = i_node_name then select 403, 'Cannot drop location of nodes parent' into ret_code, ret_note; return; end if; end if; -- -- There may be obsolete subscriptions around -- drop them silently. -- perform pgq_node.unregister_subscriber(i_queue_name, i_node_name); -- -- Actual removal -- delete from pgq_node.node_location where queue_name = i_queue_name and node_name = i_node_name; if found then select 200, 'Ok' into ret_code, ret_note; else select 301, 'Location not found: ' || i_queue_name || '/' || i_node_name into ret_code, ret_note; end if; if node.node_type = 'root' then perform pgq.insert_event(i_queue_name, 'pgq.unregister-location', i_node_name, i_queue_name, null, null, null) from pgq_node.node_info n where n.queue_name = i_queue_name; end if; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.unregister_subscriber.sql0000644000000000000000000000257412426435645023653 0ustar create or replace function pgq_node.unregister_subscriber( in i_queue_name text, in i_remote_node_name text, out ret_code int4, out ret_note text) returns record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.unregister_subscriber(2) -- -- Unsubscribe remote node from local node. -- -- Parameters: -- i_queue_name - set name -- i_remote_node_name - node name -- -- Returns: -- ret_code - error code -- ret_note - description -- ---------------------------------------------------------------------- declare n_wm_name text; worker_name text; begin n_wm_name := '.' || i_remote_node_name || '.watermark'; select s.worker_name into worker_name from pgq_node.subscriber_info s where queue_name = i_queue_name and subscriber_node = i_remote_node_name; if not found then select 304, 'Subscriber not found' into ret_code, ret_note; return; end if; delete from pgq_node.subscriber_info where queue_name = i_queue_name and subscriber_node = i_remote_node_name; perform pgq.unregister_consumer(i_queue_name, n_wm_name); perform pgq.unregister_consumer(i_queue_name, worker_name); select 200, 'Subscriber unregistered: '||i_remote_node_name into ret_code, ret_note; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.set_consumer_uptodate.sql0000644000000000000000000000215412426435645023646 0ustar create or replace function pgq_node.set_consumer_uptodate( in i_queue_name text, in i_consumer_name text, in i_uptodate boolean, out ret_code int4, out ret_note text) returns record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.set_consumer_uptodate(3) -- -- Set consumer uptodate flag..... -- -- Parameters: -- i_queue_name - queue name -- i_consumer_name - consumer name -- i_uptodate - new flag state -- -- Returns: -- 200 - ok -- 404 - consumer not known -- ---------------------------------------------------------------------- begin update pgq_node.local_state set uptodate = i_uptodate where queue_name = i_queue_name and consumer_name = i_consumer_name; if found then select 200, 'Consumer uptodate = ' || i_uptodate::int4::text into ret_code, ret_note; else select 404, 'Consumer not known: ' || i_queue_name || '/' || i_consumer_name into ret_code, ret_note; end if; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.get_worker_state.sql0000644000000000000000000000774712426435645022620 0ustar create or replace function pgq_node.get_worker_state( in i_queue_name text, out ret_code int4, out ret_note text, out node_type text, out node_name text, out completed_tick bigint, out provider_node text, out provider_location text, out paused boolean, out uptodate boolean, out cur_error text, out worker_name text, out global_watermark bigint, out local_watermark bigint, out local_queue_top bigint, out combined_queue text, out combined_type text ) returns record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.get_worker_state(1) -- -- Get info for consumer that maintains local node. -- -- Parameters: -- i_queue_name - cascaded queue name -- -- Returns: -- node_type - local node type -- node_name - local node name -- completed_tick - last committed tick -- provider_node - provider node name -- provider_location - connect string to provider node -- paused - this node should not do any work -- uptodate - if consumer has loaded last changes -- cur_error - failure reason -- worker_name - consumer name that maintains this node -- global_watermark - queue's global watermark -- local_watermark - queue's local watermark, for this and below nodes -- local_queue_top - last tick in local queue -- combined_queue - queue name for target set -- combined_type - node type of target setA -- ---------------------------------------------------------------------- begin select n.node_type, n.node_name, n.worker_name, n.combined_queue into node_type, node_name, worker_name, combined_queue from pgq_node.node_info n where n.queue_name = i_queue_name; if not found then select 404, 'Unknown queue: ' || i_queue_name into ret_code, ret_note; return; end if; select s.last_tick_id, s.provider_node, s.paused, s.uptodate, s.cur_error into completed_tick, provider_node, paused, uptodate, cur_error from pgq_node.local_state s where s.queue_name = i_queue_name and s.consumer_name = worker_name; if not found then select 404, 'Unknown consumer: ' || i_queue_name || '/' || worker_name into ret_code, ret_note; return; end if; select 100, 'Ok', p.node_location into ret_code, ret_note, provider_location from pgq_node.node_location p where p.queue_name = i_queue_name and p.node_name = provider_node; if not found then select 404, 'Unknown provider node: ' || i_queue_name || '/' || provider_node into ret_code, ret_note; return; end if; if combined_queue is not null then select n.node_type into combined_type from pgq_node.node_info n where n.queue_name = get_worker_state.combined_queue; if not found then select 404, 'Combinde queue node not found: ' || combined_queue into ret_code, ret_note; return; end if; end if; if node_type in ('root', 'branch') then select min(case when consumer_name = '.global_watermark' then null else last_tick end), min(case when consumer_name = '.global_watermark' then last_tick else null end) into local_watermark, global_watermark from pgq.get_consumer_info(i_queue_name); if local_watermark is null then select t.tick_id into local_watermark from pgq.tick t, pgq.queue q where t.tick_queue = q.queue_id and q.queue_name = i_queue_name order by 1 desc limit 1; end if; select tick_id from pgq.tick t, pgq.queue q where q.queue_name = i_queue_name and t.tick_queue = q.queue_id order by t.tick_queue desc, t.tick_id desc limit 1 into local_queue_top; else local_watermark := completed_tick; end if; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.get_consumer_info.sql0000644000000000000000000000243112426435645022736 0ustar create or replace function pgq_node.get_consumer_info( in i_queue_name text, out consumer_name text, out provider_node text, out last_tick_id int8, out paused boolean, out uptodate boolean, out cur_error text) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.get_consumer_info(1) -- -- Get consumer list that work on the local node. -- -- Parameters: -- i_queue_name - cascaded queue name -- -- Returns: -- consumer_name - cascaded consumer name -- provider_node - node from where the consumer reads from -- last_tick_id - last committed tick -- paused - if consumer is paused -- uptodate - if consumer is uptodate -- cur_error - failure reason -- ---------------------------------------------------------------------- begin for consumer_name, provider_node, last_tick_id, paused, uptodate, cur_error in select s.consumer_name, s.provider_node, s.last_tick_id, s.paused, s.uptodate, s.cur_error from pgq_node.local_state s where s.queue_name = i_queue_name order by 1 loop return next; end loop; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.create_node.sql0000644000000000000000000001027212426435645021503 0ustar create or replace function pgq_node.create_node( in i_queue_name text, in i_node_type text, in i_node_name text, in i_worker_name text, in i_provider_name text, in i_global_watermark bigint, in i_combined_queue text, out ret_code int4, out ret_note text) returns record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.create_node(7) -- -- Initialize node. -- -- Parameters: -- i_node_name - cascaded queue name -- i_node_type - node type -- i_node_name - node name -- i_worker_name - worker consumer name -- i_provider_name - provider node name for non-root nodes -- i_global_watermark - global lowest tick_id -- i_combined_queue - merge-leaf: target queue -- -- Returns: -- 200 - Ok -- 401 - node already initialized -- ???? - maybe we coud use more error codes ? -- -- Node Types: -- root - master node -- branch - subscriber node that can be provider to others -- leaf - subscriber node that cannot be provider to others -- Calls: -- None -- Tables directly manipulated: -- None -- ---------------------------------------------------------------------- declare _wm_consumer text; _global_wm bigint; begin perform 1 from pgq_node.node_info where queue_name = i_queue_name; if found then select 401, 'Node already initialized' into ret_code, ret_note; return; end if; _wm_consumer := '.global_watermark'; if i_node_type = 'root' then if coalesce(i_provider_name, i_global_watermark::text, i_combined_queue) is not null then select 401, 'unexpected args for '||i_node_type into ret_code, ret_note; return; end if; perform pgq.create_queue(i_queue_name); perform pgq.register_consumer(i_queue_name, _wm_consumer); _global_wm := (select last_tick from pgq.get_consumer_info(i_queue_name, _wm_consumer)); elsif i_node_type = 'branch' then if i_provider_name is null then select 401, 'provider not set for '||i_node_type into ret_code, ret_note; return; end if; if i_global_watermark is null then select 401, 'global watermark not set for '||i_node_type into ret_code, ret_note; return; end if; perform pgq.create_queue(i_queue_name); update pgq.queue set queue_external_ticker = true, queue_disable_insert = true where queue_name = i_queue_name; if i_global_watermark > 1 then perform pgq.ticker(i_queue_name, i_global_watermark, now(), 1); end if; perform pgq.register_consumer_at(i_queue_name, _wm_consumer, i_global_watermark); _global_wm := i_global_watermark; elsif i_node_type = 'leaf' then _global_wm := i_global_watermark; if i_combined_queue is not null then perform 1 from pgq.get_queue_info(i_combined_queue); if not found then select 401, 'non-existing queue on leaf side: '||i_combined_queue into ret_code, ret_note; return; end if; end if; else select 401, 'bad node type: '||i_node_type into ret_code, ret_note; end if; insert into pgq_node.node_info (queue_name, node_type, node_name, worker_name, combined_queue) values (i_queue_name, i_node_type, i_node_name, i_worker_name, i_combined_queue); if i_node_type <> 'root' then select f.ret_code, f.ret_note into ret_code, ret_note from pgq_node.register_consumer(i_queue_name, i_worker_name, i_provider_name, _global_wm) f; else select f.ret_code, f.ret_note into ret_code, ret_note from pgq_node.register_consumer(i_queue_name, i_worker_name, i_node_name, _global_wm) f; end if; if ret_code <> 200 then return; end if; select 200, 'Node "' || i_node_name || '" initialized for queue "' || i_queue_name || '" with type "' || i_node_type || '"' into ret_code, ret_note; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.demote_root.sql0000644000000000000000000000721212426435645021553 0ustar create or replace function pgq_node.demote_root( in i_queue_name text, in i_step int4, in i_new_provider text, out ret_code int4, out ret_note text, out last_tick int8) as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.demote_root(3) -- -- Multi-step root demotion to branch. -- -- Must be be called for each step in sequence: -- -- Step 1 - disable writing to queue. -- Step 2 - wait until writers go away, do tick. -- Step 3 - change type, register. -- -- Parameters: -- i_queue_name - queue name -- i_step - step number -- i_new_provider - new provider node -- Returns: -- 200 - success -- 404 - node not initialized for queue -- 301 - node is not root -- ---------------------------------------------------------------------- declare n_type text; w_name text; sql text; ev_id int8; ev_tbl text; begin select node_type, worker_name into n_type, w_name from pgq_node.node_info where queue_name = i_queue_name for update; if not found then select 404, 'Node not initialized for queue: ' || i_queue_name into ret_code, ret_note; return; end if; if n_type != 'root' then select 301, 'Node not root' into ret_code, ret_note; return; end if; if i_step > 1 then select queue_data_pfx into ev_tbl from pgq.queue where queue_name = i_queue_name and queue_disable_insert and queue_external_ticker; if not found then raise exception 'steps in wrong order'; end if; end if; if i_step = 1 then update pgq.queue set queue_disable_insert = true, queue_external_ticker = true where queue_name = i_queue_name; if not found then select 404, 'Huh, no queue?: ' || i_queue_name into ret_code, ret_note; return; end if; select 200, 'Step 1: Writing disabled for: ' || i_queue_name into ret_code, ret_note; elsif i_step = 2 then set local session_replication_role = 'replica'; -- lock parent table to stop updates, allow reading sql := 'lock table ' || ev_tbl || ' in exclusive mode'; execute sql; select nextval(queue_tick_seq), nextval(queue_event_seq) into last_tick, ev_id from pgq.queue where queue_name = i_queue_name; perform pgq.ticker(i_queue_name, last_tick, now(), ev_id); select 200, 'Step 2: Inserted last tick: ' || i_queue_name into ret_code, ret_note; elsif i_step = 3 then -- change type, point worker to new provider select t.tick_id into last_tick from pgq.tick t, pgq.queue q where q.queue_name = i_queue_name and t.tick_queue = q.queue_id order by t.tick_queue desc, t.tick_id desc limit 1; update pgq_node.node_info set node_type = 'branch' where queue_name = i_queue_name; update pgq_node.local_state set provider_node = i_new_provider, last_tick_id = last_tick, uptodate = false where queue_name = i_queue_name and consumer_name = w_name; select 200, 'Step 3: Demoted root to branch: ' || i_queue_name into ret_code, ret_note; else raise exception 'incorrect step number'; end if; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.get_node_info.sql0000644000000000000000000000700212426435645022027 0ustar drop function if exists pgq_node.get_node_info(text); create or replace function pgq_node.get_node_info( in i_queue_name text, out ret_code int4, out ret_note text, out node_type text, out node_name text, out global_watermark bigint, out local_watermark bigint, out provider_node text, out provider_location text, out combined_queue text, out combined_type text, out worker_name text, out worker_paused bool, out worker_uptodate bool, out worker_last_tick bigint, out node_attrs text ) returns record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.get_node_info(1) -- -- Get local node info for cascaded queue. -- -- Parameters: -- i_queue_name - cascaded queue name -- -- Returns: -- node_type - local node type -- node_name - local node name -- global_watermark - queue's global watermark -- local_watermark - queue's local watermark, for this and below nodes -- provider_node - provider node name -- provider_location - provider connect string -- combined_queue - queue name for target set -- combined_type - node type of target set -- worker_name - consumer name that maintains this node -- worker_paused - is worker paused -- worker_uptodate - is worker seen the changes -- worker_last_tick - last committed tick_id by worker -- node_attrs - urlencoded dict of random attrs for worker (eg. sync_watermark) -- ---------------------------------------------------------------------- declare sql text; begin select 100, 'Ok', n.node_type, n.node_name, c.node_type, c.queue_name, w.provider_node, l.node_location, n.worker_name, w.paused, w.uptodate, w.last_tick_id, n.node_attrs into ret_code, ret_note, node_type, node_name, combined_type, combined_queue, provider_node, provider_location, worker_name, worker_paused, worker_uptodate, worker_last_tick, node_attrs from pgq_node.node_info n left join pgq_node.node_info c on (c.queue_name = n.combined_queue) left join pgq_node.local_state w on (w.queue_name = n.queue_name and w.consumer_name = n.worker_name) left join pgq_node.node_location l on (l.queue_name = w.queue_name and l.node_name = w.provider_node) where n.queue_name = i_queue_name; if not found then select 404, 'Unknown queue: ' || i_queue_name into ret_code, ret_note; return; end if; if node_type in ('root', 'branch') then select min(case when consumer_name = '.global_watermark' then null else last_tick end), min(case when consumer_name = '.global_watermark' then last_tick else null end) into local_watermark, global_watermark from pgq.get_consumer_info(i_queue_name); if local_watermark is null then select t.tick_id into local_watermark from pgq.tick t, pgq.queue q where t.tick_queue = q.queue_id and q.queue_name = i_queue_name order by 1 desc limit 1; end if; else local_watermark := worker_last_tick; end if; if node_type = 'root' then select tick_id from pgq.tick t, pgq.queue q where q.queue_name = i_queue_name and t.tick_queue = q.queue_id order by t.tick_queue desc, t.tick_id desc limit 1 into worker_last_tick; end if; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.get_queue_locations.sql0000644000000000000000000000165212426435645023273 0ustar create or replace function pgq_node.get_queue_locations( in i_queue_name text, out node_name text, out node_location text, out dead boolean ) returns setof record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.get_queue_locations(1) -- -- Get node list for the queue. -- -- Parameters: -- i_queue_name - queue name -- -- Returns: -- node_name - node name -- node_location - libpq connect string for the node -- dead - whether the node should be considered dead -- ---------------------------------------------------------------------- begin for node_name, node_location, dead in select l.node_name, l.node_location, l.dead from pgq_node.node_location l where l.queue_name = i_queue_name loop return next; end loop; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.promote_branch.sql0000644000000000000000000000412512426435645022235 0ustar create or replace function pgq_node.promote_branch( in i_queue_name text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.promote_branch(1) -- -- Promote branch node to root. -- -- Parameters: -- i_queue_name - queue name -- -- Returns: -- 200 - success -- 404 - node not initialized for queue -- 301 - node is not branch -- ---------------------------------------------------------------------- declare n_name text; n_type text; w_name text; last_tick bigint; sql text; begin select node_name, node_type, worker_name into n_name, n_type, w_name from pgq_node.node_info where queue_name = i_queue_name for update; if not found then select 404, 'Node not initialized for queue: ' || i_queue_name into ret_code, ret_note; return; end if; if n_type != 'branch' then select 301, 'Node not branch' into ret_code, ret_note; return; end if; update pgq.queue set queue_disable_insert = false, queue_external_ticker = false where queue_name = i_queue_name; -- change type, point worker to itself select t.tick_id into last_tick from pgq.tick t, pgq.queue q where q.queue_name = i_queue_name and t.tick_queue = q.queue_id order by t.tick_queue desc, t.tick_id desc limit 1; -- make tick seq larger than last tick perform pgq.seq_setval(queue_tick_seq, last_tick) from pgq.queue where queue_name = i_queue_name; update pgq_node.node_info set node_type = 'root' where queue_name = i_queue_name; update pgq_node.local_state set provider_node = n_name, last_tick_id = last_tick, uptodate = false where queue_name = i_queue_name and consumer_name = w_name; select 200, 'Branch node promoted to root' into ret_code, ret_note; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.unregister_consumer.sql0000644000000000000000000000223212426435645023332 0ustar create or replace function pgq_node.unregister_consumer( in i_queue_name text, in i_consumer_name text, out ret_code int4, out ret_note text) returns record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.unregister_consumer(2) -- -- Unregister cascaded consumer from local node. -- -- Parameters: -- i_queue_name - cascaded queue name -- i_consumer_name - cascaded consumer name -- -- Returns: -- ret_code - error code -- 200 - ok -- 404 - no such queue -- ret_note - description -- ---------------------------------------------------------------------- begin perform 1 from pgq_node.node_info where queue_name = i_queue_name for update; if not found then select 404, 'Unknown queue: ' || i_queue_name into ret_code, ret_note; return; end if; delete from pgq_node.local_state where queue_name = i_queue_name and consumer_name = i_consumer_name; select 200, 'Consumer '||i_consumer_name||' unregistered from '||i_queue_name into ret_code, ret_note; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.register_location.sql0000644000000000000000000000354712426435645022756 0ustar create or replace function pgq_node.register_location( in i_queue_name text, in i_node_name text, in i_node_location text, in i_dead boolean, out ret_code int4, out ret_note text) returns record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.register_location(4) -- -- Add new node location. -- -- Parameters: -- i_queue_name - queue name -- i_node_name - node name -- i_node_location - node connect string -- i_dead - dead flag for node -- -- Returns: -- ret_code - error code -- ret_note - error description -- -- Return Codes: -- 200 - Ok -- ---------------------------------------------------------------------- declare node record; begin select node_type = 'root' as is_root into node from pgq_node.node_info where queue_name = i_queue_name for update; -- may return 0 rows perform 1 from pgq_node.node_location where queue_name = i_queue_name and node_name = i_node_name; if found then update pgq_node.node_location set node_location = coalesce(i_node_location, node_location), dead = i_dead where queue_name = i_queue_name and node_name = i_node_name; elsif i_node_location is not null then insert into pgq_node.node_location (queue_name, node_name, node_location, dead) values (i_queue_name, i_node_name, i_node_location, i_dead); end if; if node.is_root then perform pgq.insert_event(i_queue_name, 'pgq.location-info', i_node_name, i_queue_name, i_node_location, i_dead::text, null) from pgq_node.node_info n where n.queue_name = i_queue_name; end if; select 200, 'Location registered' into ret_code, ret_note; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.set_global_watermark.sql0000644000000000000000000000605512426435645023427 0ustar create or replace function pgq_node.set_global_watermark( in i_queue_name text, in i_watermark bigint, out ret_code int4, out ret_note text) returns record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.set_global_watermark(2) -- -- Move global watermark on branch/leaf, publish on root. -- -- Parameters: -- i_queue_name - queue name -- i_watermark - global tick_id that is processed everywhere. -- NULL on root, then local wm is published. -- ---------------------------------------------------------------------- declare this record; _wm bigint; wm_consumer text; begin wm_consumer = '.global_watermark'; select node_type, queue_name, worker_name into this from pgq_node.node_info where queue_name = i_queue_name for update; if not found then select 404, 'Queue' || i_queue_name || ' not found' into ret_code, ret_note; return; end if; _wm = i_watermark; if this.node_type = 'root' then if i_watermark is null then select f.ret_code, f.ret_note, f.local_watermark into ret_code, ret_note, _wm from pgq_node.get_node_info(i_queue_name) f; if ret_code >= 300 then return; end if; if _wm is null then raise exception 'local_watermark=NULL from get_node_info()?'; end if; end if; -- move watermark perform pgq.register_consumer_at(i_queue_name, wm_consumer, _wm); -- send event downstream perform pgq.insert_event(i_queue_name, 'pgq.global-watermark', _wm::text, i_queue_name, null, null, null); -- update root workers pos to avoid it getting stale update pgq_node.local_state set last_tick_id = _wm where queue_name = i_queue_name and consumer_name = this.worker_name; elsif this.node_type = 'branch' then if i_watermark is null then select 500, 'bad usage: wm=null on branch node' into ret_code, ret_note; return; end if; -- tick can be missing if we are processing -- old batches that set watermark outside -- current range perform 1 from pgq.tick t, pgq.queue q where q.queue_name = i_queue_name and t.tick_queue = q.queue_id and t.tick_id = _wm; if not found then select 200, 'Skipping global watermark update to ' || _wm::text into ret_code, ret_note; return; end if; -- move watermark perform pgq.register_consumer_at(i_queue_name, wm_consumer, _wm); else select 100, 'Ignoring global watermark in leaf' into ret_code, ret_note; return; end if; select 200, 'Global watermark set to ' || _wm::text into ret_code, ret_note; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.get_consumer_state.sql0000644000000000000000000000435612426435645023133 0ustar create or replace function pgq_node.get_consumer_state( in i_queue_name text, in i_consumer_name text, out ret_code int4, out ret_note text, out node_type text, out node_name text, out completed_tick bigint, out provider_node text, out provider_location text, out paused boolean, out uptodate boolean, out cur_error text ) returns record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.get_consumer_state(2) -- -- Get info for cascaded consumer that targets local node. -- -- Parameters: -- i_node_name - cascaded queue name -- i_consumer_name - cascaded consumer name -- -- Returns: -- node_type - local node type -- node_name - local node name -- completed_tick - last committed tick -- provider_node - provider node name -- provider_location - connect string to provider node -- paused - this node should not do any work -- uptodate - if consumer has loaded last changes -- cur_error - failure reason -- ---------------------------------------------------------------------- begin select n.node_type, n.node_name into node_type, node_name from pgq_node.node_info n where n.queue_name = i_queue_name; if not found then select 404, 'Unknown queue: ' || i_queue_name into ret_code, ret_note; return; end if; select s.last_tick_id, s.provider_node, s.paused, s.uptodate, s.cur_error into completed_tick, provider_node, paused, uptodate, cur_error from pgq_node.local_state s where s.queue_name = i_queue_name and s.consumer_name = i_consumer_name; if not found then select 404, 'Unknown consumer: ' || i_queue_name || '/' || i_consumer_name into ret_code, ret_note; return; end if; select 100, 'Ok', p.node_location into ret_code, ret_note, provider_location from pgq_node.node_location p where p.queue_name = i_queue_name and p.node_name = provider_node; if not found then select 404, 'Unknown provider node: ' || i_queue_name || '/' || provider_node into ret_code, ret_note; return; end if; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.set_consumer_completed.sql0000644000000000000000000000233412426435645023775 0ustar create or replace function pgq_node.set_consumer_completed( in i_queue_name text, in i_consumer_name text, in i_tick_id int8, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.set_consumer_completed(3) -- -- Set last completed tick id for the cascaded consumer -- that it has committed to local node. -- -- Parameters: -- i_queue_name - cascaded queue name -- i_consumer_name - cascaded consumer name -- i_tick_id - tick id -- Returns: -- 200 - ok -- 404 - consumer not known -- ---------------------------------------------------------------------- begin update pgq_node.local_state set last_tick_id = i_tick_id, cur_error = NULL where queue_name = i_queue_name and consumer_name = i_consumer_name; if found then select 100, 'Consumer ' || i_consumer_name || ' compleded tick = ' || i_tick_id::text into ret_code, ret_note; else select 404, 'Consumer not known: ' || i_queue_name || '/' || i_consumer_name into ret_code, ret_note; end if; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.set_consumer_error.sql0000644000000000000000000000201412426435645023145 0ustar create or replace function pgq_node.set_consumer_error( in i_queue_name text, in i_consumer_name text, in i_error_msg text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.set_consumer_error(3) -- -- If batch processing fails, consumer can store it's last error in db. -- Returns: -- 100 - ok -- 101 - consumer not known -- ---------------------------------------------------------------------- begin update pgq_node.local_state set cur_error = i_error_msg where queue_name = i_queue_name and consumer_name = i_consumer_name; if found then select 100, 'Consumer ' || i_consumer_name || ' error = ' || i_error_msg into ret_code, ret_note; else select 101, 'Consumer not known, ignoring: ' || i_queue_name || '/' || i_consumer_name into ret_code, ret_note; end if; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.is_leaf_node.sql0000644000000000000000000000130512426435645021637 0ustar create or replace function pgq_node.is_leaf_node(i_queue_name text) returns bool as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.is_leaf_node(1) -- -- Checs if node is leaf. -- -- Parameters: -- i_queue_name - queue name -- Returns: -- true - if this this the leaf node for queue -- ---------------------------------------------------------------------- declare res bool; begin select n.node_type = 'leaf' into res from pgq_node.node_info n where n.queue_name = i_queue_name; if not found then raise exception 'queue does not exist: %', i_queue_name; end if; return res; end; $$ language plpgsql; skytools-3.2.6/sql/pgq_node/functions/pgq_node.set_node_attrs.sql0000644000000000000000000000162412426435645022251 0ustar create or replace function pgq_node.set_node_attrs( in i_queue_name text, in i_node_attrs text, out ret_code int4, out ret_note text) returns record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.create_attrs(2) -- -- Set node attributes. -- -- Parameters: -- i_node_name - cascaded queue name -- i_node_attrs - urlencoded node attrs -- -- Returns: -- 200 - ok -- 404 - node not found -- ---------------------------------------------------------------------- begin update pgq_node.node_info set node_attrs = i_node_attrs where queue_name = i_queue_name; if not found then select 404, 'Node not found' into ret_code, ret_note; return; end if; select 200, 'Node attributes updated' into ret_code, ret_note; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.set_partition_watermark.sql0000644000000000000000000000325712426435645024201 0ustar create or replace function pgq_node.set_partition_watermark( in i_combined_queue_name text, in i_part_queue_name text, in i_watermark bigint, out ret_code int4, out ret_note text) returns record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.set_partition_watermark(3) -- -- Move merge-leaf position on combined-branch. -- -- Parameters: -- i_combined_queue_name - local combined queue name -- i_part_queue_name - local part queue name (merge-leaf) -- i_watermark - partition tick_id that came inside combined-root batch -- -- Returns: -- 200 - success -- 201 - no partition queue -- 401 - worker registration not found -- ---------------------------------------------------------------------- declare n record; begin -- check if combined-branch exists select c.node_type, p.worker_name into n from pgq_node.node_info c, pgq_node.node_info p where p.queue_name = i_part_queue_name and c.queue_name = i_combined_queue_name and p.combined_queue = c.queue_name and p.node_type = 'leaf' and c.node_type = 'branch'; if not found then select 201, 'Part-queue does not exist' into ret_code, ret_note; return; end if; update pgq_node.local_state set last_tick_id = i_watermark where queue_name = i_part_queue_name and consumer_name = n.worker_name; if not found then select 401, 'Worker registration not found' into ret_code, ret_note; return; end if; select 200, 'Ok' into ret_code, ret_note; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.change_consumer_provider.sql0000644000000000000000000000273612426435645024313 0ustar create or replace function pgq_node.change_consumer_provider( in i_queue_name text, in i_consumer_name text, in i_new_provider text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.change_consumer_provider(3) -- -- Change provider for this consumer. -- -- Parameters: -- i_queue_name - queue name -- i_consumer_name - consumer name -- i_new_provider - node name for new provider -- Returns: -- ret_code - error code -- 200 - ok -- 404 - no such consumer or new node -- ret_note - description -- ---------------------------------------------------------------------- begin perform 1 from pgq_node.node_location where queue_name = i_queue_name and node_name = i_new_provider; if not found then select 404, 'New node not found: ' || i_new_provider into ret_code, ret_note; return; end if; update pgq_node.local_state set provider_node = i_new_provider, uptodate = false where queue_name = i_queue_name and consumer_name = i_consumer_name; if not found then select 404, 'Unknown consumer: ' || i_queue_name || '/' || i_consumer_name into ret_code, ret_note; return; end if; select 200, 'Consumer provider node set to : ' || i_new_provider into ret_code, ret_note; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.upgrade_schema.sql0000644000000000000000000000076712426435645022212 0ustar create or replace function pgq_node.upgrade_schema() returns int4 as $$ -- updates table structure if necessary declare cnt int4 = 0; begin -- node_info.node_attrs perform 1 from information_schema.columns where table_schema = 'pgq_node' and table_name = 'node_info' and column_name = 'node_attrs'; if not found then alter table pgq_node.node_info add column node_attrs text; cnt := cnt + 1; end if; return cnt; end; $$ language plpgsql; skytools-3.2.6/sql/pgq_node/functions/pgq_node.register_consumer.sql0000644000000000000000000000405712426435645022776 0ustar create or replace function pgq_node.register_consumer( in i_queue_name text, in i_consumer_name text, in i_provider_node text, in i_custom_tick_id int8, out ret_code int4, out ret_note text) returns record as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.register_consumer(4) -- -- Subscribe plain cascaded consumer to a target node. -- That means it's planning to read from remote node -- and write to local node. -- -- Parameters: -- i_queue_name - set name -- i_consumer_name - cascaded consumer name -- i_provider_node - node name -- i_custom_tick_id - tick id -- -- Returns: -- ret_code - error code -- 200 - ok -- 201 - already registered -- 401 - no such queue -- ret_note - description -- ---------------------------------------------------------------------- declare n record; node_wm_name text; node_pos bigint; begin select node_type into n from pgq_node.node_info where queue_name = i_queue_name for update; if not found then select 404, 'Unknown queue: ' || i_queue_name into ret_code, ret_note; return; end if; perform 1 from pgq_node.local_state where queue_name = i_queue_name and consumer_name = i_consumer_name; if found then update pgq_node.local_state set provider_node = i_provider_node, last_tick_id = i_custom_tick_id where queue_name = i_queue_name and consumer_name = i_consumer_name; select 201, 'Consumer already registered: ' || i_queue_name || '/' || i_consumer_name into ret_code, ret_note; return; end if; insert into pgq_node.local_state (queue_name, consumer_name, provider_node, last_tick_id) values (i_queue_name, i_consumer_name, i_provider_node, i_custom_tick_id); select 200, 'Consumer '||i_consumer_name||' registered on queue '||i_queue_name into ret_code, ret_note; return; end; $$ language plpgsql security definer; skytools-3.2.6/sql/pgq_node/functions/pgq_node.version.sql0000644000000000000000000000066512426435645020725 0ustar create or replace function pgq_node.version() returns text as $$ -- ---------------------------------------------------------------------- -- Function: pgq_node.version(0) -- -- Returns version string for pgq_node. ATM it is based on SkyTools -- version and only bumped when database code changes. -- ---------------------------------------------------------------------- begin return '3.2.5'; end; $$ language plpgsql; skytools-3.2.6/sql/pgq_node/sql/0000755000000000000000000000000012426435645013504 5ustar skytools-3.2.6/sql/pgq_node/sql/pgq_node_init_ext.sql0000644000000000000000000000057512426435645017733 0ustar create extension pgq; \set ECHO none \i structure/install.sql \set ECHO all create extension pgq_node from unpackaged; select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_node'; drop extension pgq_node; create extension pgq_node; select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_node'; skytools-3.2.6/sql/pgq_node/sql/pgq_node_init_noext.sql0000644000000000000000000000012612426435645020260 0ustar \set ECHO none \i ../pgq/pgq.sql \i structure/tables.sql \i structure/functions.sql skytools-3.2.6/sql/pgq_node/sql/pgq_node_test.sql0000644000000000000000000001305712426435645017066 0ustar select * from pgq_node.register_location('aqueue', 'node1', 'dbname=node1', false); select * from pgq_node.register_location('aqueue', 'node2', 'dbname=node2', false); select * from pgq_node.register_location('aqueue', 'node3', 'dbname=node3', false); select * from pgq_node.register_location('aqueue', 'node4', 'dbname=node44', false); select * from pgq_node.register_location('aqueue', 'node4', 'dbname=node4', false); select * from pgq_node.register_location('aqueue', 'node5', 'dbname=node4', false); select * from pgq_node.get_queue_locations('aqueue'); select * from pgq_node.unregister_location('aqueue', 'node5'); select * from pgq_node.unregister_location('aqueue', 'node5'); select * from pgq_node.get_queue_locations('aqueue'); select * from pgq_node.create_node('aqueue', 'root', 'node1', 'node1_worker', null, null, null); select * from pgq_node.register_subscriber('aqueue', 'node2', 'node2_worker', null); select * from pgq_node.register_subscriber('aqueue', 'node3', 'node3_worker', null); select * from pgq_node.maint_watermark('aqueue'); select * from pgq_node.maint_watermark('aqueue-x'); select * from pgq_node.get_consumer_info('aqueue'); select * from pgq_node.unregister_subscriber('aqueue', 'node3'); select queue_name, consumer_name, last_tick from pgq.get_consumer_info(); select * from pgq_node.get_worker_state('aqueue'); update pgq.queue set queue_ticker_max_lag = '0', queue_ticker_idle_period = '0'; select * from pgq.ticker('aqueue'); select * from pgq.ticker('aqueue'); select * from pgq_node.set_subscriber_watermark('aqueue', 'node2', 3); select queue_name, consumer_name, last_tick from pgq.get_consumer_info(); select * from pgq_node.set_node_attrs('aqueue', 'test=1'); select * from pgq_node.get_node_info('aqueue'); select * from pgq_node.get_subscriber_info('aqueue'); -- branch node select * from pgq_node.register_location('bqueue', 'node1', 'dbname=node1', false); select * from pgq_node.register_location('bqueue', 'node2', 'dbname=node2', false); select * from pgq_node.register_location('bqueue', 'node3', 'dbname=node3', false); select * from pgq_node.create_node('bqueue', 'branch', 'node2', 'node2_worker', 'node1', 1, null); select * from pgq_node.register_consumer('bqueue', 'random_consumer', 'node1', 1); select * from pgq_node.register_consumer('bqueue', 'random_consumer2', 'node1', 1); select * from pgq_node.local_state; select * from pgq_node.node_info; select * from pgq_node.get_node_info('aqueue'); select * from pgq_node.get_node_info('bqueue'); select * from pgq_node.get_node_info('cqueue'); select * from pgq_node.get_worker_state('aqueue'); select * from pgq_node.get_worker_state('bqueue'); select * from pgq_node.get_worker_state('cqueue'); select * from pgq_node.is_root_node('aqueue'); select * from pgq_node.is_root_node('bqueue'); select * from pgq_node.is_root_node('cqueue'); select * from pgq_node.get_consumer_state('bqueue', 'random_consumer'); select * from pgq_node.get_consumer_state('bqueue', 'random_consumer2'); select * from pgq_node.set_consumer_error('bqueue', 'random_consumer2', 'failure'); select * from pgq_node.get_consumer_state('bqueue', 'random_consumer2'); select * from pgq_node.set_consumer_completed('bqueue', 'random_consumer2', 2); select * from pgq_node.get_consumer_state('bqueue', 'random_consumer2'); select * from pgq_node.set_consumer_paused('bqueue', 'random_consumer2', true); select * from pgq_node.set_consumer_uptodate('bqueue', 'random_consumer2', true); select * from pgq_node.change_consumer_provider('bqueue', 'random_consumer2', 'node3'); select * from pgq_node.get_consumer_state('bqueue', 'random_consumer2'); select * from pgq_node.unregister_consumer('bqueue', 'random_consumer2'); select * from pgq_node.get_consumer_state('bqueue', 'random_consumer2'); select * from pgq_node.get_node_info('bqueue'); set session_replication_role = 'replica'; select * from pgq_node.demote_root('aqueue', 1, 'node3'); select * from pgq_node.demote_root('aqueue', 1, 'node3'); select * from pgq_node.demote_root('aqueue', 2, 'node3'); select * from pgq_node.demote_root('aqueue', 2, 'node3'); select * from pgq_node.demote_root('aqueue', 3, 'node3'); select * from pgq_node.demote_root('aqueue', 3, 'node3'); -- leaf node select * from pgq_node.register_location('mqueue', 'node1', 'dbname=node1', false); select * from pgq_node.register_location('mqueue', 'node2', 'dbname=node2', false); select * from pgq_node.register_location('mqueue', 'node3', 'dbname=node3', false); select * from pgq_node.create_node('mqueue', 'leaf', 'node2', 'node2_worker', 'node1', 13, 'aqueue'); select * from pgq_node.get_worker_state('mqueue'); select * from pgq_node.drop_node('asd', 'asd'); select * from pgq_node.drop_node('mqueue', 'node3'); select * from pgq_node.drop_node('mqueue', 'node2'); select * from pgq_node.drop_node('mqueue', 'node1'); select * from pgq_node.drop_node('aqueue', 'node5'); select * from pgq_node.drop_node('aqueue', 'node4'); select * from pgq_node.drop_node('aqueue', 'node1'); select * from pgq_node.drop_node('aqueue', 'node2'); select * from pgq_node.drop_node('aqueue', 'node3'); \q select * from pgq_node.subscribe_node('aqueue', 'node2'); select * from pgq_node.subscribe_node('aqueue', 'node3', 1); select * from pgq_node.unsubscribe_node('aqueue', 'node3'); select * from pgq_node.get_node_info('aqueue'); select * from pgq_node.is_root('q'); select * from pgq_node.is_root('aqueue'); select * from pgq_node.is_root(null); select * from pgq_node.rename_node_step1('aqueue', 'node2', 'node2x'); select * from pgq_node.rename_node_step2('aqueue', 'node2', 'node2x'); select * from pgq_node.get_subscriber_info('aqueue'); skytools-3.2.6/sql/pgq_node/expected/0000755000000000000000000000000012426435645014506 5ustar skytools-3.2.6/sql/pgq_node/expected/pgq_node_init_ext.out0000644000000000000000000000073712426435645020745 0ustar create extension pgq; \set ECHO none upgrade_schema ---------------- 0 (1 row) create extension pgq_node from unpackaged; select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_node'; dumpable ---------- 4 (1 row) drop extension pgq_node; create extension pgq_node; select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_node'; dumpable ---------- 4 (1 row) skytools-3.2.6/sql/pgq_node/expected/pgq_node_init_noext.out0000644000000000000000000000020512426435645021270 0ustar \set ECHO none upgrade_schema ---------------- 0 (1 row) upgrade_schema ---------------- 0 (1 row) skytools-3.2.6/sql/pgq_node/expected/pgq_node_test.out0000644000000000000000000005572412426435645020107 0ustar select * from pgq_node.register_location('aqueue', 'node1', 'dbname=node1', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.register_location('aqueue', 'node2', 'dbname=node2', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.register_location('aqueue', 'node3', 'dbname=node3', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.register_location('aqueue', 'node4', 'dbname=node44', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.register_location('aqueue', 'node4', 'dbname=node4', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.register_location('aqueue', 'node5', 'dbname=node4', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.get_queue_locations('aqueue'); node_name | node_location | dead -----------+---------------+------ node1 | dbname=node1 | f node2 | dbname=node2 | f node3 | dbname=node3 | f node4 | dbname=node4 | f node5 | dbname=node4 | f (5 rows) select * from pgq_node.unregister_location('aqueue', 'node5'); ret_code | ret_note ----------+---------- 200 | Ok (1 row) select * from pgq_node.unregister_location('aqueue', 'node5'); ret_code | ret_note ----------+---------------------------------- 301 | Location not found: aqueue/node5 (1 row) select * from pgq_node.get_queue_locations('aqueue'); node_name | node_location | dead -----------+---------------+------ node1 | dbname=node1 | f node2 | dbname=node2 | f node3 | dbname=node3 | f node4 | dbname=node4 | f (4 rows) select * from pgq_node.create_node('aqueue', 'root', 'node1', 'node1_worker', null, null, null); ret_code | ret_note ----------+-------------------------------------------------------------- 200 | Node "node1" initialized for queue "aqueue" with type "root" (1 row) select * from pgq_node.register_subscriber('aqueue', 'node2', 'node2_worker', null); ret_code | ret_note | global_watermark ----------+------------------------------+------------------ 200 | Subscriber registered: node2 | 1 (1 row) select * from pgq_node.register_subscriber('aqueue', 'node3', 'node3_worker', null); ret_code | ret_note | global_watermark ----------+------------------------------+------------------ 200 | Subscriber registered: node3 | 1 (1 row) select * from pgq_node.maint_watermark('aqueue'); maint_watermark ----------------- 0 (1 row) select * from pgq_node.maint_watermark('aqueue-x'); maint_watermark ----------------- 0 (1 row) select * from pgq_node.get_consumer_info('aqueue'); consumer_name | provider_node | last_tick_id | paused | uptodate | cur_error ---------------+---------------+--------------+--------+----------+----------- node1_worker | node1 | 1 | f | f | (1 row) select * from pgq_node.unregister_subscriber('aqueue', 'node3'); ret_code | ret_note ----------+-------------------------------- 200 | Subscriber unregistered: node3 (1 row) select queue_name, consumer_name, last_tick from pgq.get_consumer_info(); queue_name | consumer_name | last_tick ------------+-------------------+----------- aqueue | .global_watermark | 1 aqueue | .node2.watermark | 1 aqueue | node2_worker | 1 (3 rows) select * from pgq_node.get_worker_state('aqueue'); ret_code | ret_note | node_type | node_name | completed_tick | provider_node | provider_location | paused | uptodate | cur_error | worker_name | global_watermark | local_watermark | local_queue_top | combined_queue | combined_type ----------+----------+-----------+-----------+----------------+---------------+-------------------+--------+----------+-----------+--------------+------------------+-----------------+-----------------+----------------+--------------- 100 | Ok | root | node1 | 1 | node1 | dbname=node1 | f | f | | node1_worker | 1 | 1 | 1 | | (1 row) update pgq.queue set queue_ticker_max_lag = '0', queue_ticker_idle_period = '0'; select * from pgq.ticker('aqueue'); ticker -------- 2 (1 row) select * from pgq.ticker('aqueue'); ticker -------- 3 (1 row) select * from pgq_node.set_subscriber_watermark('aqueue', 'node2', 3); ret_code | ret_note ----------+--------------------------- 200 | .node2.watermark set to 3 (1 row) select queue_name, consumer_name, last_tick from pgq.get_consumer_info(); queue_name | consumer_name | last_tick ------------+-------------------+----------- aqueue | .global_watermark | 1 aqueue | .node2.watermark | 3 aqueue | node2_worker | 1 (3 rows) select * from pgq_node.set_node_attrs('aqueue', 'test=1'); ret_code | ret_note ----------+------------------------- 200 | Node attributes updated (1 row) select * from pgq_node.get_node_info('aqueue'); ret_code | ret_note | node_type | node_name | global_watermark | local_watermark | provider_node | provider_location | combined_queue | combined_type | worker_name | worker_paused | worker_uptodate | worker_last_tick | node_attrs ----------+----------+-----------+-----------+------------------+-----------------+---------------+-------------------+----------------+---------------+--------------+---------------+-----------------+------------------+------------ 100 | Ok | root | node1 | 1 | 1 | node1 | dbname=node1 | | | node1_worker | f | f | 3 | test=1 (1 row) select * from pgq_node.get_subscriber_info('aqueue'); node_name | worker_name | node_watermark -----------+--------------+---------------- node2 | node2_worker | 3 (1 row) -- branch node select * from pgq_node.register_location('bqueue', 'node1', 'dbname=node1', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.register_location('bqueue', 'node2', 'dbname=node2', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.register_location('bqueue', 'node3', 'dbname=node3', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.create_node('bqueue', 'branch', 'node2', 'node2_worker', 'node1', 1, null); ret_code | ret_note ----------+---------------------------------------------------------------- 200 | Node "node2" initialized for queue "bqueue" with type "branch" (1 row) select * from pgq_node.register_consumer('bqueue', 'random_consumer', 'node1', 1); ret_code | ret_note ----------+----------------------------------------------------- 200 | Consumer random_consumer registered on queue bqueue (1 row) select * from pgq_node.register_consumer('bqueue', 'random_consumer2', 'node1', 1); ret_code | ret_note ----------+------------------------------------------------------ 200 | Consumer random_consumer2 registered on queue bqueue (1 row) select * from pgq_node.local_state; queue_name | consumer_name | provider_node | last_tick_id | cur_error | paused | uptodate ------------+------------------+---------------+--------------+-----------+--------+---------- aqueue | node1_worker | node1 | 1 | | f | f bqueue | node2_worker | node1 | 1 | | f | f bqueue | random_consumer | node1 | 1 | | f | f bqueue | random_consumer2 | node1 | 1 | | f | f (4 rows) select * from pgq_node.node_info; queue_name | node_type | node_name | worker_name | combined_queue | node_attrs ------------+-----------+-----------+--------------+----------------+------------ aqueue | root | node1 | node1_worker | | test=1 bqueue | branch | node2 | node2_worker | | (2 rows) select * from pgq_node.get_node_info('aqueue'); ret_code | ret_note | node_type | node_name | global_watermark | local_watermark | provider_node | provider_location | combined_queue | combined_type | worker_name | worker_paused | worker_uptodate | worker_last_tick | node_attrs ----------+----------+-----------+-----------+------------------+-----------------+---------------+-------------------+----------------+---------------+--------------+---------------+-----------------+------------------+------------ 100 | Ok | root | node1 | 1 | 1 | node1 | dbname=node1 | | | node1_worker | f | f | 3 | test=1 (1 row) select * from pgq_node.get_node_info('bqueue'); ret_code | ret_note | node_type | node_name | global_watermark | local_watermark | provider_node | provider_location | combined_queue | combined_type | worker_name | worker_paused | worker_uptodate | worker_last_tick | node_attrs ----------+----------+-----------+-----------+------------------+-----------------+---------------+-------------------+----------------+---------------+--------------+---------------+-----------------+------------------+------------ 100 | Ok | branch | node2 | 1 | 1 | node1 | dbname=node1 | | | node2_worker | f | f | 1 | (1 row) select * from pgq_node.get_node_info('cqueue'); ret_code | ret_note | node_type | node_name | global_watermark | local_watermark | provider_node | provider_location | combined_queue | combined_type | worker_name | worker_paused | worker_uptodate | worker_last_tick | node_attrs ----------+-----------------------+-----------+-----------+------------------+-----------------+---------------+-------------------+----------------+---------------+-------------+---------------+-----------------+------------------+------------ 404 | Unknown queue: cqueue | | | | | | | | | | | | | (1 row) select * from pgq_node.get_worker_state('aqueue'); ret_code | ret_note | node_type | node_name | completed_tick | provider_node | provider_location | paused | uptodate | cur_error | worker_name | global_watermark | local_watermark | local_queue_top | combined_queue | combined_type ----------+----------+-----------+-----------+----------------+---------------+-------------------+--------+----------+-----------+--------------+------------------+-----------------+-----------------+----------------+--------------- 100 | Ok | root | node1 | 1 | node1 | dbname=node1 | f | f | | node1_worker | 1 | 1 | 3 | | (1 row) select * from pgq_node.get_worker_state('bqueue'); ret_code | ret_note | node_type | node_name | completed_tick | provider_node | provider_location | paused | uptodate | cur_error | worker_name | global_watermark | local_watermark | local_queue_top | combined_queue | combined_type ----------+----------+-----------+-----------+----------------+---------------+-------------------+--------+----------+-----------+--------------+------------------+-----------------+-----------------+----------------+--------------- 100 | Ok | branch | node2 | 1 | node1 | dbname=node1 | f | f | | node2_worker | 1 | 1 | 1 | | (1 row) select * from pgq_node.get_worker_state('cqueue'); ret_code | ret_note | node_type | node_name | completed_tick | provider_node | provider_location | paused | uptodate | cur_error | worker_name | global_watermark | local_watermark | local_queue_top | combined_queue | combined_type ----------+-----------------------+-----------+-----------+----------------+---------------+-------------------+--------+----------+-----------+-------------+------------------+-----------------+-----------------+----------------+--------------- 404 | Unknown queue: cqueue | | | | | | | | | | | | | | (1 row) select * from pgq_node.is_root_node('aqueue'); is_root_node -------------- t (1 row) select * from pgq_node.is_root_node('bqueue'); is_root_node -------------- f (1 row) select * from pgq_node.is_root_node('cqueue'); ERROR: queue does not exist: cqueue select * from pgq_node.get_consumer_state('bqueue', 'random_consumer'); ret_code | ret_note | node_type | node_name | completed_tick | provider_node | provider_location | paused | uptodate | cur_error ----------+----------+-----------+-----------+----------------+---------------+-------------------+--------+----------+----------- 100 | Ok | branch | node2 | 1 | node1 | dbname=node1 | f | f | (1 row) select * from pgq_node.get_consumer_state('bqueue', 'random_consumer2'); ret_code | ret_note | node_type | node_name | completed_tick | provider_node | provider_location | paused | uptodate | cur_error ----------+----------+-----------+-----------+----------------+---------------+-------------------+--------+----------+----------- 100 | Ok | branch | node2 | 1 | node1 | dbname=node1 | f | f | (1 row) select * from pgq_node.set_consumer_error('bqueue', 'random_consumer2', 'failure'); ret_code | ret_note ----------+------------------------------------------- 100 | Consumer random_consumer2 error = failure (1 row) select * from pgq_node.get_consumer_state('bqueue', 'random_consumer2'); ret_code | ret_note | node_type | node_name | completed_tick | provider_node | provider_location | paused | uptodate | cur_error ----------+----------+-----------+-----------+----------------+---------------+-------------------+--------+----------+----------- 100 | Ok | branch | node2 | 1 | node1 | dbname=node1 | f | f | failure (1 row) select * from pgq_node.set_consumer_completed('bqueue', 'random_consumer2', 2); ret_code | ret_note ----------+---------------------------------------------- 100 | Consumer random_consumer2 compleded tick = 2 (1 row) select * from pgq_node.get_consumer_state('bqueue', 'random_consumer2'); ret_code | ret_note | node_type | node_name | completed_tick | provider_node | provider_location | paused | uptodate | cur_error ----------+----------+-----------+-----------+----------------+---------------+-------------------+--------+----------+----------- 100 | Ok | branch | node2 | 2 | node1 | dbname=node1 | f | f | (1 row) select * from pgq_node.set_consumer_paused('bqueue', 'random_consumer2', true); ret_code | ret_note ----------+-------------------------------------------- 200 | Consumer random_consumer2 tagged as paused (1 row) select * from pgq_node.set_consumer_uptodate('bqueue', 'random_consumer2', true); ret_code | ret_note ----------+----------------------- 200 | Consumer uptodate = 1 (1 row) select * from pgq_node.change_consumer_provider('bqueue', 'random_consumer2', 'node3'); ret_code | ret_note ----------+--------------------------------------- 200 | Consumer provider node set to : node3 (1 row) select * from pgq_node.get_consumer_state('bqueue', 'random_consumer2'); ret_code | ret_note | node_type | node_name | completed_tick | provider_node | provider_location | paused | uptodate | cur_error ----------+----------+-----------+-----------+----------------+---------------+-------------------+--------+----------+----------- 100 | Ok | branch | node2 | 2 | node3 | dbname=node3 | t | f | (1 row) select * from pgq_node.unregister_consumer('bqueue', 'random_consumer2'); ret_code | ret_note ----------+---------------------------------------------------- 200 | Consumer random_consumer2 unregistered from bqueue (1 row) select * from pgq_node.get_consumer_state('bqueue', 'random_consumer2'); ret_code | ret_note | node_type | node_name | completed_tick | provider_node | provider_location | paused | uptodate | cur_error ----------+-------------------------------------------+-----------+-----------+----------------+---------------+-------------------+--------+----------+----------- 404 | Unknown consumer: bqueue/random_consumer2 | branch | node2 | | | | | | (1 row) select * from pgq_node.get_node_info('bqueue'); ret_code | ret_note | node_type | node_name | global_watermark | local_watermark | provider_node | provider_location | combined_queue | combined_type | worker_name | worker_paused | worker_uptodate | worker_last_tick | node_attrs ----------+----------+-----------+-----------+------------------+-----------------+---------------+-------------------+----------------+---------------+--------------+---------------+-----------------+------------------+------------ 100 | Ok | branch | node2 | 1 | 1 | node1 | dbname=node1 | | | node2_worker | f | f | 1 | (1 row) set session_replication_role = 'replica'; select * from pgq_node.demote_root('aqueue', 1, 'node3'); ret_code | ret_note | last_tick ----------+--------------------------------------+----------- 200 | Step 1: Writing disabled for: aqueue | (1 row) select * from pgq_node.demote_root('aqueue', 1, 'node3'); ret_code | ret_note | last_tick ----------+--------------------------------------+----------- 200 | Step 1: Writing disabled for: aqueue | (1 row) select * from pgq_node.demote_root('aqueue', 2, 'node3'); ret_code | ret_note | last_tick ----------+------------------------------------+----------- 200 | Step 2: Inserted last tick: aqueue | 4 (1 row) select * from pgq_node.demote_root('aqueue', 2, 'node3'); ret_code | ret_note | last_tick ----------+------------------------------------+----------- 200 | Step 2: Inserted last tick: aqueue | 5 (1 row) select * from pgq_node.demote_root('aqueue', 3, 'node3'); ret_code | ret_note | last_tick ----------+----------------------------------------+----------- 200 | Step 3: Demoted root to branch: aqueue | 5 (1 row) select * from pgq_node.demote_root('aqueue', 3, 'node3'); ret_code | ret_note | last_tick ----------+---------------+----------- 301 | Node not root | (1 row) -- leaf node select * from pgq_node.register_location('mqueue', 'node1', 'dbname=node1', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.register_location('mqueue', 'node2', 'dbname=node2', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.register_location('mqueue', 'node3', 'dbname=node3', false); ret_code | ret_note ----------+--------------------- 200 | Location registered (1 row) select * from pgq_node.create_node('mqueue', 'leaf', 'node2', 'node2_worker', 'node1', 13, 'aqueue'); ret_code | ret_note ----------+-------------------------------------------------------------- 200 | Node "node2" initialized for queue "mqueue" with type "leaf" (1 row) select * from pgq_node.get_worker_state('mqueue'); ret_code | ret_note | node_type | node_name | completed_tick | provider_node | provider_location | paused | uptodate | cur_error | worker_name | global_watermark | local_watermark | local_queue_top | combined_queue | combined_type ----------+----------+-----------+-----------+----------------+---------------+-------------------+--------+----------+-----------+--------------+------------------+-----------------+-----------------+----------------+--------------- 100 | Ok | leaf | node2 | 13 | node1 | dbname=node1 | f | f | | node2_worker | | 13 | | aqueue | branch (1 row) select * from pgq_node.drop_node('asd', 'asd'); ret_code | ret_note ----------+------------------- 200 | Node dropped: asd (1 row) select * from pgq_node.drop_node('mqueue', 'node3'); ret_code | ret_note ----------+--------------------- 200 | Node dropped: node3 (1 row) select * from pgq_node.drop_node('mqueue', 'node2'); ret_code | ret_note ----------+--------------------- 200 | Node dropped: node2 (1 row) select * from pgq_node.drop_node('mqueue', 'node1'); ret_code | ret_note ----------+--------------------- 200 | Node dropped: node1 (1 row) select * from pgq_node.drop_node('aqueue', 'node5'); ret_code | ret_note ----------+--------------------- 200 | Node dropped: node5 (1 row) select * from pgq_node.drop_node('aqueue', 'node4'); ret_code | ret_note ----------+--------------------- 200 | Node dropped: node4 (1 row) select * from pgq_node.drop_node('aqueue', 'node1'); ret_code | ret_note ----------+--------------------- 200 | Node dropped: node1 (1 row) select * from pgq_node.drop_node('aqueue', 'node2'); ret_code | ret_note ----------+--------------------- 200 | Node dropped: node2 (1 row) select * from pgq_node.drop_node('aqueue', 'node3'); ret_code | ret_note ----------+--------------------- 200 | Node dropped: node3 (1 row) \q skytools-3.2.6/sql/pgq_node/pgq_node.control0000644000000000000000000000023312426435645016101 0ustar # pgq_node comment = 'Cascaded queue infrastructure' default_version = '3.2.5' relocatable = false superuser = true schema = 'pg_catalog' requires = 'pgq' skytools-3.2.6/sql/pgq_node/docs/0000755000000000000000000000000012426435645013635 5ustar skytools-3.2.6/sql/pgq_node/docs/Topics.txt0000644000000000000000000000650512426435645015645 0ustar Format: 1.52 # This is the Natural Docs topics file for this project. If you change anything # here, it will apply to THIS PROJECT ONLY. If you'd like to change something # for all your projects, edit the Topics.txt in Natural Docs' Config directory # instead. # If you'd like to prevent keywords from being recognized by Natural Docs, you # can do it like this: # Ignore Keywords: [keyword], [keyword], ... # # Or you can use the list syntax like how they are defined: # Ignore Keywords: # [keyword] # [keyword], [plural keyword] # ... #------------------------------------------------------------------------------- # SYNTAX: # # Topic Type: [name] # Alter Topic Type: [name] # Creates a new topic type or alters one from the main file. Each type gets # its own index and behavior settings. Its name can have letters, numbers, # spaces, and these charaters: - / . ' # # Plural: [name] # Sets the plural name of the topic type, if different. # # Keywords: # [keyword] # [keyword], [plural keyword] # ... # Defines or adds to the list of keywords for the topic type. They may only # contain letters, numbers, and spaces and are not case sensitive. Plural # keywords are used for list topics. You can redefine keywords found in the # main topics file. # # Index: [yes|no] # Whether the topics get their own index. Defaults to yes. Everything is # included in the general index regardless of this setting. # # Scope: [normal|start|end|always global] # How the topics affects scope. Defaults to normal. # normal - Topics stay within the current scope. # start - Topics start a new scope for all the topics beneath it, # like class topics. # end - Topics reset the scope back to global for all the topics # beneath it. # always global - Topics are defined as global, but do not change the scope # for any other topics. # # Class Hierarchy: [yes|no] # Whether the topics are part of the class hierarchy. Defaults to no. # # Page Title If First: [yes|no] # Whether the topic's title becomes the page title if it's the first one in # a file. Defaults to no. # # Break Lists: [yes|no] # Whether list topics should be broken into individual topics in the output. # Defaults to no. # # Can Group With: [type], [type], ... # Defines a list of topic types that this one can possibly be grouped with. # Defaults to none. #------------------------------------------------------------------------------- # The following topics are defined in the main file, if you'd like to alter # their behavior or add keywords: # # Generic, Class, Interface, Section, File, Group, Function, Variable, # Property, Type, Constant, Enumeration, Event, Delegate, Macro, # Database, Database Table, Database View, Database Index, Database # Cursor, Database Trigger, Cookie, Build Target # If you add something that you think would be useful to other developers # and should be included in Natural Docs by default, please e-mail it to # topics [at] naturaldocs [dot] org. Topic Type: Schema Plural: Schemas Index: No Scope: Start Class Hierarchy: Yes Keywords: schema, schemas Alter Topic Type: Function Add Keywords: public function internal function Alter Topic Type: File Index: No skytools-3.2.6/sql/pgq_node/docs/Languages.txt0000644000000000000000000001202112426435645016300 0ustar Format: 1.52 # This is the Natural Docs languages file for this project. If you change # anything here, it will apply to THIS PROJECT ONLY. If you'd like to change # something for all your projects, edit the Languages.txt in Natural Docs' # Config directory instead. Ignore Extension: sql #------------------------------------------------------------------------------- # SYNTAX: # # Unlike other Natural Docs configuration files, in this file all comments # MUST be alone on a line. Some languages deal with the # character, so you # cannot put comments on the same line as content. # # Also, all lists are separated with spaces, not commas, again because some # languages may need to use them. # # Language: [name] # Alter Language: [name] # Defines a new language or alters an existing one. Its name can use any # characters. If any of the properties below have an add/replace form, you # must use that when using Alter Language. # # The language Shebang Script is special. It's entry is only used for # extensions, and files with those extensions have their shebang (#!) lines # read to determine the real language of the file. Extensionless files are # always treated this way. # # The language Text File is also special. It's treated as one big comment # so you can put Natural Docs content in them without special symbols. Also, # if you don't specify a package separator, ignored prefixes, or enum value # behavior, it will copy those settings from the language that is used most # in the source tree. # # Extensions: [extension] [extension] ... # [Add/Replace] Extensions: [extension] [extension] ... # Defines the file extensions of the language's source files. You can # redefine extensions found in the main languages file. You can use * to # mean any undefined extension. # # Shebang Strings: [string] [string] ... # [Add/Replace] Shebang Strings: [string] [string] ... # Defines a list of strings that can appear in the shebang (#!) line to # designate that it's part of the language. You can redefine strings found # in the main languages file. # # Ignore Prefixes in Index: [prefix] [prefix] ... # [Add/Replace] Ignored Prefixes in Index: [prefix] [prefix] ... # # Ignore [Topic Type] Prefixes in Index: [prefix] [prefix] ... # [Add/Replace] Ignored [Topic Type] Prefixes in Index: [prefix] [prefix] ... # Specifies prefixes that should be ignored when sorting symbols in an # index. Can be specified in general or for a specific topic type. # #------------------------------------------------------------------------------ # For basic language support only: # # Line Comments: [symbol] [symbol] ... # Defines a space-separated list of symbols that are used for line comments, # if any. # # Block Comments: [opening sym] [closing sym] [opening sym] [closing sym] ... # Defines a space-separated list of symbol pairs that are used for block # comments, if any. # # Package Separator: [symbol] # Defines the default package separator symbol. The default is a dot. # # [Topic Type] Prototype Enders: [symbol] [symbol] ... # When defined, Natural Docs will attempt to get a prototype from the code # immediately following the topic type. It stops when it reaches one of # these symbols. Use \n for line breaks. # # Line Extender: [symbol] # Defines the symbol that allows a prototype to span multiple lines if # normally a line break would end it. # # Enum Values: [global|under type|under parent] # Defines how enum values are referenced. The default is global. # global - Values are always global, referenced as 'value'. # under type - Values are under the enum type, referenced as # 'package.enum.value'. # under parent - Values are under the enum's parent, referenced as # 'package.value'. # # Perl Package: [perl package] # Specifies the Perl package used to fine-tune the language behavior in ways # too complex to do in this file. # #------------------------------------------------------------------------------ # For full language support only: # # Full Language Support: [perl package] # Specifies the Perl package that has the parsing routines necessary for full # language support. # #------------------------------------------------------------------------------- # The following languages are defined in the main file, if you'd like to alter # them: # # Text File, Shebang Script, C/C++, C#, Java, JavaScript, Perl, Python, # PHP, SQL, Visual Basic, Pascal, Assembly, Ada, Tcl, Ruby, Makefile, # ActionScript, ColdFusion, R, Fortran # If you add a language that you think would be useful to other developers # and should be included in Natural Docs by default, please e-mail it to # languages [at] naturaldocs [dot] org. Language: PLPGSQL Extension: sql Line Comment: -- Block Comment: /* */ Enum Values: Global Function Prototype Enders: , ; ) $ ' Variable Prototype Enders: , ; ) := default Default DEFAULT Database Index Prototype Enders: , ; ) Database Trigger Prototype Enders: begin Begin BEGIN skytools-3.2.6/sql/pgq_node/docs/Menu.txt0000644000000000000000000000370612426435645015310 0ustar Format: 1.52 # You can add a title and sub-title to your menu like this: # Title: [project name] # SubTitle: [subtitle] # You can add a footer to your documentation like this: # Footer: [text] # If you want to add a copyright notice, this would be the place to do it. # You can add a timestamp to your documentation like one of these: # Timestamp: Generated on month day, year # Timestamp: Updated mm/dd/yyyy # Timestamp: Last updated mon day # # m - One or two digit month. January is "1" # mm - Always two digit month. January is "01" # mon - Short month word. January is "Jan" # month - Long month word. January is "January" # d - One or two digit day. 1 is "1" # dd - Always two digit day. 1 is "01" # day - Day with letter extension. 1 is "1st" # yy - Two digit year. 2006 is "06" # yyyy - Four digit year. 2006 is "2006" # year - Four digit year. 2006 is "2006" # -------------------------------------------------------------------------- # # Cut and paste the lines below to change the order in which your files # appear on the menu. Don't worry about adding or removing files, Natural # Docs will take care of that. # # You can further organize the menu by grouping the entries. Add a # "Group: [name] {" line to start a group, and add a "}" to end it. # # You can add text and web links to the menu by adding "Text: [text]" and # "Link: [name] ([URL])" lines, respectively. # # The formatting and comments are auto-generated, so don't worry about # neatness when editing the file. Natural Docs will clean it up the next # time it is run. When working with groups, just deal with the braces and # forget about the indentation and comments. # # -------------------------------------------------------------------------- File: Functions (functions.sql) File: Tables (pgq_node.sql) Group: Index { Index: Everything Database Table Index: Database Tables Function Index: Functions } # Group: Index skytools-3.2.6/sql/pgq_node/structure/0000755000000000000000000000000012426435645014745 5ustar skytools-3.2.6/sql/pgq_node/structure/ext_postproc.sql0000644000000000000000000000051012426435645020213 0ustar -- tag data objects as dumpable SELECT pg_catalog.pg_extension_config_dump('pgq_node.node_location', ''); SELECT pg_catalog.pg_extension_config_dump('pgq_node.node_info', ''); SELECT pg_catalog.pg_extension_config_dump('pgq_node.local_state', ''); SELECT pg_catalog.pg_extension_config_dump('pgq_node.subscriber_info', ''); skytools-3.2.6/sql/pgq_node/structure/tables.sql0000644000000000000000000001176412426435645016751 0ustar -- ---------------------------------------------------------------------- -- File: Tables -- -- Schema 'pgq_node', contains tables for cascaded pgq. -- -- Event types for cascaded queue: -- pgq.location-info - ev_data: node_name, extra1: queue_name, extra2: location, extra3: dead -- It contains updated node connect string. -- -- pgq.global-watermark - ev_data: tick_id, extra1: queue_name -- Root node sends minimal tick_id that must be kept. -- -- pgq.tick-id - ev_data: tick_id, extra1: queue_name -- Partition node inserts its tick-id into combined queue. -- -- ---------------------------------------------------------------------- create schema pgq_node; -- ---------------------------------------------------------------------- -- Table: pgq_node.node_location -- -- Static table that just lists all members in set. -- -- Columns: -- queue_name - cascaded queue name -- node_name - node name -- node_location - libpq connect string for connecting to node -- dead - whether the node is offline -- ---------------------------------------------------------------------- create table pgq_node.node_location ( queue_name text not null, node_name text not null, node_location text not null, dead boolean not null default false, primary key (queue_name, node_name) ); -- ---------------------------------------------------------------------- -- Table: pgq_node.node_info -- -- Local node info. -- -- Columns: -- queue_name - cascaded queue name -- node_type - local node type -- node_name - local node name -- worker_name - consumer name that maintains this node -- combined_queue - on 'leaf' the target combined set name -- node_attrs - urlencoded fields for worker -- -- Node types: -- root - data + batches is generated here -- branch - replicates full queue contents and maybe contains some tables -- leaf - does not replicate queue / or uses combined queue for that -- ---------------------------------------------------------------------- create table pgq_node.node_info ( queue_name text not null primary key, node_type text not null, node_name text not null, worker_name text, combined_queue text, node_attrs text, foreign key (queue_name, node_name) references pgq_node.node_location, check (node_type in ('root', 'branch', 'leaf')), check (case when node_type = 'root' then (worker_name is not null and combined_queue is null) when node_type = 'branch' then (worker_name is not null and combined_queue is null) when node_type = 'leaf' then (worker_name is not null) else false end) ); -- ---------------------------------------------------------------------- -- Table: pgq_node.local_state -- -- All cascaded consumers (both worker and non-worker) -- keep their state here. -- -- Columns: -- queue_name - cascaded queue name -- consumer_name - cascaded consumer name -- provider_node - node name the consumer reads from -- last_tick_id - last committed tick id on this node -- cur_error - reason why current batch failed -- paused - whether consumer should wait -- uptodate - if consumer has seen new state -- ---------------------------------------------------------------------- create table pgq_node.local_state ( queue_name text not null, consumer_name text not null, provider_node text not null, last_tick_id bigint not null, cur_error text, paused boolean not null default false, uptodate boolean not null default false, primary key (queue_name, consumer_name), foreign key (queue_name) references pgq_node.node_info, foreign key (queue_name, provider_node) references pgq_node.node_location ); -- ---------------------------------------------------------------------- -- Table: pgq_node.subscriber_info -- -- List of nodes that subscribe to local node. -- -- Columns: -- queue_name - cascaded queue name -- subscriber_node - node name that uses this node as provider. -- worker_name - consumer name that maintains remote node -- ---------------------------------------------------------------------- create table pgq_node.subscriber_info ( queue_name text not null, subscriber_node text not null, worker_name text not null, watermark_name text not null, primary key (queue_name, subscriber_node), foreign key (queue_name) references pgq_node.node_info, foreign key (queue_name, subscriber_node) references pgq_node.node_location, foreign key (worker_name) references pgq.consumer (co_name), foreign key (watermark_name) references pgq.consumer (co_name) ); skytools-3.2.6/sql/pgq_node/structure/ext_unpackaged.sql0000644000000000000000000000043112426435645020446 0ustar ALTER EXTENSION pgq_node ADD SCHEMA pgq_node; ALTER EXTENSION pgq_node ADD TABLE pgq_node.node_location; ALTER EXTENSION pgq_node ADD TABLE pgq_node.node_info; ALTER EXTENSION pgq_node ADD TABLE pgq_node.local_state; ALTER EXTENSION pgq_node ADD TABLE pgq_node.subscriber_info; skytools-3.2.6/sql/pgq_node/structure/upgrade.sql0000644000000000000000000000003312426435645017111 0ustar \i structure/functions.sql skytools-3.2.6/sql/pgq_node/structure/grants.ini0000644000000000000000000000434712426435645016754 0ustar [GrantFu] roles = pgq_writer, pgq_admin, pgq_reader, public [1.public.fns] on.functions = %(pgq_node_public_fns)s public = execute # cascaded consumer, target side [2.consumer.fns] on.functions = %(pgq_node_consumer_fns)s pgq_writer = execute pgq_admin = execute # cascaded worker, target side [3.worker.fns] on.functions = %(pgq_node_worker_fns)s pgq_admin = execute # cascaded consumer/worker, source side [4.remote.fns] on.functions = %(pgq_node_remote_fns)s pgq_reader = execute pgq_writer = execute pgq_admin = execute # called by ticker, upgrade script [4.admin.fns] on.functions = %(pgq_node_admin_fns)s pgq_admin = execute [5.tables] pgq_reader = select pgq_writer = select pgq_admin = select, insert, update, delete on.tables = pgq_node.node_location, pgq_node.node_info, pgq_node.local_state, pgq_node.subscriber_info # define various groups of functions [DEFAULT] pgq_node_remote_fns = pgq_node.get_consumer_info(text), pgq_node.get_consumer_state(text, text), pgq_node.get_queue_locations(text), pgq_node.get_node_info(text), pgq_node.get_subscriber_info(text), pgq_node.register_subscriber(text, text, text, int8), pgq_node.unregister_subscriber(text, text), pgq_node.set_subscriber_watermark(text, text, bigint) pgq_node_public_fns = pgq_node.is_root_node(text), pgq_node.is_leaf_node(text), pgq_node.version() pgq_node_admin_fns = pgq_node.register_location(text, text, text, boolean), pgq_node.unregister_location(text, text), pgq_node.upgrade_schema(), pgq_node.maint_watermark(text) pgq_node_consumer_fns = pgq_node.register_consumer(text, text, text, int8), pgq_node.unregister_consumer(text, text), pgq_node.change_consumer_provider(text, text, text), pgq_node.set_consumer_uptodate(text, text, boolean), pgq_node.set_consumer_paused(text, text, boolean), pgq_node.set_consumer_completed(text, text, int8), pgq_node.set_consumer_error(text, text, text) pgq_node_worker_fns = pgq_node.create_node(text, text, text, text, text, bigint, text), pgq_node.drop_node(text, text), pgq_node.demote_root(text, int4, text), pgq_node.promote_branch(text), pgq_node.set_node_attrs(text, text), pgq_node.get_worker_state(text), pgq_node.set_global_watermark(text, bigint), pgq_node.set_partition_watermark(text, text, bigint) skytools-3.2.6/sql/pgq_node/structure/functions.sql0000644000000000000000000000636712426435645017512 0ustar -- File: Functions -- -- Database functions for cascaded pgq. -- -- Cascaded consumer flow: -- -- - (1) [target] call pgq_node.get_consumer_state() -- - (2) If .paused is true, sleep, go to (1). -- This is allows to control consumer remotely. -- - (3) If .uptodate is false, call pgq_node.set_consumer_uptodate(true). -- This allows remote controller to know that consumer has seen the changes. -- - (4) [source] call pgq.next_batch(). If returns NULL, sleep, goto (1) -- - (5) [source] if batch already done, call pgq.finish_batch(), go to (1) -- - (6) [source] read events -- - (7) [target] process events, call pgq_node.set_consumer_completed() in same tx. -- - (8) [source] call pgq.finish_batch() -- -- Cascaded worker flow: -- -- Worker is consumer that also copies to queue contents to local node (branch), -- so it can act as provider to other nodes. There can be only one worker per -- node. Or zero if node is leaf. In addition to cascaded consumer logic above, it must - -- - [branch] copy all events to local queue and create ticks -- - [merge-leaf] copy all events to combined-queue -- - [branch] publish local watermark upwards to provider so it reaches root. -- - [branch] apply global watermark event to local node -- - [merge-leaf] wait-behind on combined-branch (failover combined-root). -- It's last_tick_id is set by combined-branch worker, it must call -- pgq.next_batch()+pgq.finish_batch() without processing events -- when behind, but not move further. When the combined-branch -- becomes root, it will be in right position to continue updating. -- \i functions/pgq_node.upgrade_schema.sql select pgq_node.upgrade_schema(); -- Group: Global Node Map \i functions/pgq_node.register_location.sql \i functions/pgq_node.unregister_location.sql \i functions/pgq_node.get_queue_locations.sql -- Group: Node operations \i functions/pgq_node.create_node.sql \i functions/pgq_node.drop_node.sql -- \i functions/pgq_node.rename_node.sql \i functions/pgq_node.get_node_info.sql \i functions/pgq_node.is_root_node.sql \i functions/pgq_node.is_leaf_node.sql \i functions/pgq_node.get_subscriber_info.sql \i functions/pgq_node.get_consumer_info.sql \i functions/pgq_node.demote_root.sql \i functions/pgq_node.promote_branch.sql \i functions/pgq_node.set_node_attrs.sql -- Group: Provider side operations - worker \i functions/pgq_node.register_subscriber.sql \i functions/pgq_node.unregister_subscriber.sql \i functions/pgq_node.set_subscriber_watermark.sql -- Group: Subscriber side operations - worker \i functions/pgq_node.get_worker_state.sql \i functions/pgq_node.set_global_watermark.sql \i functions/pgq_node.set_partition_watermark.sql -- Group: Subscriber side operations - any consumer \i functions/pgq_node.register_consumer.sql \i functions/pgq_node.unregister_consumer.sql \i functions/pgq_node.get_consumer_state.sql \i functions/pgq_node.change_consumer_provider.sql \i functions/pgq_node.set_consumer_uptodate.sql \i functions/pgq_node.set_consumer_paused.sql \i functions/pgq_node.set_consumer_completed.sql \i functions/pgq_node.set_consumer_error.sql -- Group: Maintenance operations \i functions/pgq_node.maint_watermark.sql \i functions/pgq_node.version.sql skytools-3.2.6/sql/pgq_node/structure/grants.sql0000644000000000000000000000005412426435645016763 0ustar grant usage on schema pgq_node to public; skytools-3.2.6/sql/pgq_node/structure/install.sql0000644000000000000000000000011512426435645017131 0ustar \i structure/tables.sql \i structure/functions.sql \i structure/grants.sql skytools-3.2.6/sql/Makefile0000644000000000000000000000050112426435645012545 0ustar include ../config.mak SUBDIRS = londiste pgq pgq_coop pgq_ext pgq_node ticker txid all install clean distclean installcheck test: for dir in $(SUBDIRS); do \ $(MAKE) -C $$dir $@ \ DESTDIR="$(DESTDIR)" \ PG_CONFIG="$(PG_CONFIG)" \ PG_CPPFLAGS="$(PG_CPPFLAGS)" \ PYTHON="$(PYTHON)" \ || exit $?; \ done skytools-3.2.6/sql/logtriga/0000755000000000000000000000000012426435645012721 5ustar skytools-3.2.6/sql/logtriga/README.logtriga0000644000000000000000000000245412426435645015415 0ustar logtriga - generic table changes logger ======================================= logtriga provides generic table changes logging trigger. It prepares partial SQL statement about a change and gives it to user query. Usage ----- CREATE TRIGGER foo_log AFTER INSERT OR UPDATE OR DELETE ON foo_tbl FOR EACH ROW EXECUTE PROCEDURE logtriga(column_types, query); Where column_types is a string where each charater defines type of that column. Known types: * k - one of primary key columns for table. * v - data column * i - uninteresting column, to be ignored. Trigger function prepares 2 string arguments for query and executes it. * $1 - Operation type: I/U/D. * $2 - Partial SQL for event playback. * INSERT INTO FOO_TBL (field, list) values (val1, val2) * UPDATE FOO_TBL SET field1 = val1, field2 = val2 where key1 = kval1 * DELETE FROM FOO_TBL WHERE key1 = keyval1 The upper-case part is left out. Example ------- Following query emulates Slony-I behaviour: insert into SL_SCHEMA.sl_log_1 (log_origin, log_xid, log_tableid, log_actionseq, log_cmdtype, log_cmddata) values (CLUSTER_IDENT, SL_SCHEMA.getCurrentXid(), TABLE_OID, nextval('SL_SCHEMA.sl_action_seq'), $1, $2) The upper-case strings should be replaced with actual values on trigger creation. skytools-3.2.6/sql/logtriga/textbuf.c0000644000000000000000000001664612426435645014563 0ustar #include #include "utils/elog.h" #include "funcapi.h" #include "mb/pg_wchar.h" #include "parser/keywords.h" #if 1 #define talloc(len) malloc(len) #define trealloc(p, len) realloc(p, len) #define tfree(p) free(p) #else #define talloc(len) palloc(len) #define trealloc(p, len) repalloc(p, len) #define tfree(p) pfree(p) #endif #include "textbuf.h" #ifndef SET_VARSIZE #define SET_VARSIZE(x, len) VARATT_SIZEP(x) = (len) #endif struct TBuf { text *data; int size; }; static void request_avail(TBuf *tbuf, int len) { int newlen = tbuf->size; int need = VARSIZE(tbuf->data) + len; if (need < newlen) return; while (need > newlen) newlen *= 2; tbuf->data = trealloc(tbuf->data, newlen); tbuf->size = newlen; } static inline char *get_endp(TBuf *tbuf) { char *p = VARDATA(tbuf->data); int len = VARSIZE(tbuf->data) - VARHDRSZ; return p + len; } static inline void inc_used(TBuf *tbuf, int len) { SET_VARSIZE(tbuf->data, VARSIZE(tbuf->data) + len); } static void tbuf_init(TBuf *tbuf, int start_size) { if (start_size < VARHDRSZ) start_size = VARHDRSZ; tbuf->data = talloc(start_size); tbuf->size = start_size; SET_VARSIZE(tbuf->data, VARHDRSZ); } TBuf *tbuf_alloc(int start_size) { TBuf *res; res = talloc(sizeof(TBuf)); tbuf_init(res, start_size); return res; } void tbuf_free(TBuf *tbuf) { if (tbuf->data) tfree(tbuf->data); tfree(tbuf); } int tbuf_get_size(TBuf *tbuf) { return VARSIZE(tbuf->data) - VARHDRSZ; } void tbuf_reset(TBuf *tbuf) { SET_VARSIZE(tbuf->data, VARHDRSZ); } const text *tbuf_look_text(TBuf *tbuf) { return tbuf->data; } const char *tbuf_look_cstring(TBuf *tbuf) { char *p; request_avail(tbuf, 1); p = get_endp(tbuf); *p = 0; return VARDATA(tbuf->data); } void tbuf_append_cstring(TBuf *tbuf, const char *str) { int len = strlen(str); request_avail(tbuf, len); memcpy(get_endp(tbuf), str, len); inc_used(tbuf, len); } void tbuf_append_text(TBuf *tbuf, const text *str) { int len = VARSIZE(str) - VARHDRSZ; request_avail(tbuf, len); memcpy(get_endp(tbuf), VARDATA(str), len); inc_used(tbuf, len); } void tbuf_append_char(TBuf *tbuf, char chr) { char *p; request_avail(tbuf, 1); p = get_endp(tbuf); *p = chr; inc_used(tbuf, 1); } text *tbuf_steal_text(TBuf *tbuf) { text *data = tbuf->data; tbuf->data = NULL; return data; } static const char b64tbl[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; static int b64encode(char *dst, const uint8 *src, int srclen) { char *p = dst; const uint8 *s = src, *end = src + srclen; int pos = 2; uint32 buf = 0; while (s < end) { buf |= (unsigned char) *s << (pos << 3); pos--; s++; /* write it out */ if (pos < 0) { *p++ = b64tbl[ (buf >> 18) & 0x3f ]; *p++ = b64tbl[ (buf >> 12) & 0x3f ]; *p++ = b64tbl[ (buf >> 6) & 0x3f ]; *p++ = b64tbl[ buf & 0x3f ]; pos = 2; buf = 0; } } if (pos != 2) { *p++ = b64tbl[ (buf >> 18) & 0x3f ]; *p++ = b64tbl[ (buf >> 12) & 0x3f ]; *p++ = (pos == 0) ? b64tbl[ (buf >> 6) & 0x3f ] : '='; *p++ = '='; } return p - dst; } static const char hextbl[] = "0123456789abcdef"; static int urlencode(char *dst, const uint8 *src, int srclen) { const uint8 *end = src + srclen; char *p = dst; while (src < end) { if (*src == '=') *p++ = '+'; else if ((*src >= '0' && *src <= '9') || (*src >= 'A' && *src <= 'Z') || (*src >= 'a' && *src <= 'z')) *p++ = *src; else { *p++ = '%'; *p++ = hextbl[*src >> 4]; *p++ = hextbl[*src & 15]; } } return p - dst; } static int quote_literal(char *dst, const uint8 *src, int srclen) { const uint8 *cp1; char *cp2; int wl; bool is_ext = false; cp1 = src; cp2 = dst; *cp2++ = '\''; while (srclen > 0) { if ((wl = pg_mblen((const char *)cp1)) != 1) { if (wl > srclen) wl = srclen; srclen -= wl; while (wl-- > 0) *cp2++ = *cp1++; continue; } if (*cp1 == '\'') { *cp2++ = '\''; } else if (*cp1 == '\\') { if (!is_ext) { memmove(dst + 1, dst, cp2 - dst); cp2++; is_ext = true; *dst = 'E'; } *cp2++ = '\\'; } *cp2++ = *cp1++; srclen--; } *cp2++ = '\''; return cp2 - dst; } /* check if ident is keyword that needs quoting */ static bool is_keyword(const char *ident) { const ScanKeyword *kw; /* do the lookup */ #if PG_VERSION_NUM >= 80500 kw = ScanKeywordLookup(ident, ScanKeywords, NumScanKeywords); #else kw = ScanKeywordLookup(ident); #endif /* unreserved? */ #if PG_VERSION_NUM >= 80300 if (kw && kw->category == UNRESERVED_KEYWORD) return false; #endif /* found anything? */ return kw != NULL; } /* * slon_quote_identifier - Quote an identifier only if needed * * When quotes are needed, we palloc the required space; slightly * space-wasteful but well worth it for notational simplicity. * * Version: pgsql/src/backend/utils/adt/ruleutils.c,v 1.188 2005/01/13 17:19:10 */ static int quote_ident(char *dst, const uint8 *src, int srclen) { /* * Can avoid quoting if ident starts with a lowercase letter or * underscore and contains only lowercase letters, digits, and * underscores, *and* is not any SQL keyword. Otherwise, supply * quotes. */ int nquotes = 0; bool safe; const char *ptr; char *optr; char ident[NAMEDATALEN + 1]; /* expect idents be not bigger than NAMEDATALEN */ if (srclen > NAMEDATALEN) srclen = NAMEDATALEN; memcpy(ident, src, srclen); ident[srclen] = 0; /* * would like to use macros here, but they might yield * unwanted locale-specific results... */ safe = ((ident[0] >= 'a' && ident[0] <= 'z') || ident[0] == '_'); for (ptr = ident; *ptr; ptr++) { char ch = *ptr; if ((ch >= 'a' && ch <= 'z') || (ch >= '0' && ch <= '9') || (ch == '_')) continue; /* okay */ safe = false; if (ch == '"') nquotes++; } if (safe) { if (is_keyword(ident)) safe = false; } optr = dst; if (!safe) *optr++ = '"'; for (ptr = ident; *ptr; ptr++) { char ch = *ptr; if (ch == '"') *optr++ = '"'; *optr++ = ch; } if (!safe) *optr++ = '"'; return optr - dst; } void tbuf_encode_cstring(TBuf *tbuf, const char *str, const char *encoding) { if (str == NULL) elog(ERROR, "tbuf_encode_cstring: NULL"); tbuf_encode_data(tbuf, (const uint8 *)str, strlen(str), encoding); } void tbuf_encode_data(TBuf *tbuf, const uint8 *data, int len, const char *encoding) { int dlen = 0; char *dst; if (strcmp(encoding, "url") == 0) { request_avail(tbuf, len*3); dst = get_endp(tbuf); dlen = urlencode(dst, data, len); } else if (strcmp(encoding, "base64") == 0) { request_avail(tbuf, (len + 2) * 4 / 3); dst = get_endp(tbuf); dlen = b64encode(dst, data, len); } else if (strcmp(encoding, "quote_literal") == 0) { request_avail(tbuf, len * 2 + 2); dst = get_endp(tbuf); dlen = quote_literal(dst, data, len); } else if (strcmp(encoding, "quote_ident") == 0) { request_avail(tbuf, len * 2 + 2); dst = get_endp(tbuf); dlen = quote_ident(dst, data, len); } else elog(ERROR, "bad encoding"); inc_used(tbuf, dlen); } skytools-3.2.6/sql/logtriga/logtriga.c0000644000000000000000000002656612426435645014714 0ustar /* ---------------------------------------------------------------------- * logtriga.c * * Generic trigger for logging table changes. * Based on Slony-I log trigger. * Does not depend on event storage. * * Copyright (c) 2003-2006, PostgreSQL Global Development Group * Author: Jan Wieck, Afilias USA INC. * * Generalized by Marko Kreen. * ---------------------------------------------------------------------- */ #include "postgres.h" #include "executor/spi.h" #include "commands/trigger.h" #include "catalog/pg_operator.h" #include "catalog/pg_type.h" #include "utils/typcache.h" #include "utils/rel.h" #include "textbuf.h" PG_FUNCTION_INFO_V1(logtriga); Datum logtriga(PG_FUNCTION_ARGS); #ifdef PG_MODULE_MAGIC PG_MODULE_MAGIC; #endif /* * There may be several plans to be cached. * * FIXME: plans are kept in singe-linked list * so not very fast access. Probably they should be * handled more intelligently. */ typedef struct PlanCache PlanCache; struct PlanCache { PlanCache *next; char *query; void *plan; }; /* * Cache result allocations. */ typedef struct ArgCache { TBuf *op_type; TBuf *op_data; } ArgCache; static PlanCache *plan_cache = NULL; static ArgCache *arg_cache = NULL; /* * Cache helpers */ static void *get_plan(const char *query) { PlanCache *c; void *plan; Oid plan_types[2]; for (c = plan_cache; c; c = c->next) if (strcmp(query, c->query) == 0) return c->plan; /* * Plan not cached, prepare new plan then. */ plan_types[0] = TEXTOID; plan_types[1] = TEXTOID; plan = SPI_saveplan(SPI_prepare(query, 2, plan_types)); if (plan == NULL) elog(ERROR, "logtriga: SPI_prepare() failed"); /* create cache object */ c = malloc(sizeof(*c)); if (!c) elog(ERROR, "logtriga: no memory for plan cache"); c->plan = plan; c->query = strdup(query); /* insert at start */ c->next = plan_cache; plan_cache = c; return plan; } static ArgCache * get_arg_cache(void) { if (arg_cache == NULL) { ArgCache *a = malloc(sizeof(*a)); if (!a) elog(ERROR, "logtriga: no memory"); memset(a, 0, sizeof(*a)); a->op_type = tbuf_alloc(8); a->op_data = tbuf_alloc(8192); arg_cache = a; } return arg_cache; } static void append_key_eq(TBuf *tbuf, const char *col_ident, const char *col_value) { if (col_value == NULL) elog(ERROR, "logtriga: Unexpected NULL key value"); tbuf_encode_cstring(tbuf, col_ident, "quote_ident"); tbuf_append_char(tbuf, '='); tbuf_encode_cstring(tbuf, col_value, "quote_literal"); } static void append_normal_eq(TBuf *tbuf, const char *col_ident, const char *col_value) { tbuf_encode_cstring(tbuf, col_ident, "quote_ident"); tbuf_append_char(tbuf, '='); if (col_value != NULL) tbuf_encode_cstring(tbuf, col_value, "quote_literal"); else tbuf_append_cstring(tbuf, "NULL"); } static void process_insert(ArgCache *cs, TriggerData *tg, char *attkind) { HeapTuple new_row = tg->tg_trigtuple; TupleDesc tupdesc = tg->tg_relation->rd_att; int i; int need_comma = false; int attkind_idx; /* * INSERT * * op_type = 'I' op_data = ("non-NULL-col" [, ...]) values ('value' [, * ...]) */ tbuf_append_cstring(cs->op_type, "I"); /* * Specify all the columns */ tbuf_append_char(cs->op_data, '('); attkind_idx = -1; for (i = 0; i < tg->tg_relation->rd_att->natts; i++) { char *col_ident; /* Skip dropped columns */ if (tupdesc->attrs[i]->attisdropped) continue; /* Check if allowed by colstring */ attkind_idx++; if (attkind[attkind_idx] == '\0') break; if (attkind[attkind_idx] == 'i') continue; if (need_comma) tbuf_append_char(cs->op_data, ','); else need_comma = true; /* quote column name */ col_ident = SPI_fname(tupdesc, i + 1); tbuf_encode_cstring(cs->op_data, col_ident, "quote_ident"); } /* * Append the string ") values (" */ tbuf_append_cstring(cs->op_data, ") values ("); /* * Append the values */ need_comma = false; attkind_idx = -1; for (i = 0; i < tg->tg_relation->rd_att->natts; i++) { char *col_value; /* Skip dropped columns */ if (tupdesc->attrs[i]->attisdropped) continue; /* Check if allowed by colstring */ attkind_idx++; if (attkind[attkind_idx] == '\0') break; if (attkind[attkind_idx] == 'i') continue; if (need_comma) tbuf_append_char(cs->op_data, ','); else need_comma = true; /* quote column value */ col_value = SPI_getvalue(new_row, tupdesc, i + 1); if (col_value == NULL) tbuf_append_cstring(cs->op_data, "null"); else tbuf_encode_cstring(cs->op_data, col_value, "quote_literal"); } /* * Terminate and done */ tbuf_append_char(cs->op_data, ')'); } static int process_update(ArgCache *cs, TriggerData *tg, char *attkind) { HeapTuple old_row = tg->tg_trigtuple; HeapTuple new_row = tg->tg_newtuple; TupleDesc tupdesc = tg->tg_relation->rd_att; Datum old_value; Datum new_value; bool old_isnull; bool new_isnull; char *col_ident; char *col_value; int i; int need_comma = false; int need_and = false; int attkind_idx; int ignore_count = 0; /* * UPDATE * * op_type = 'U' op_data = "col_ident"='value' [, ...] where "pk_ident" = * 'value' [ and ...] */ tbuf_append_cstring(cs->op_type, "U"); attkind_idx = -1; for (i = 0; i < tg->tg_relation->rd_att->natts; i++) { /* * Ignore dropped columns */ if (tupdesc->attrs[i]->attisdropped) continue; attkind_idx++; if (attkind[attkind_idx] == '\0') break; old_value = SPI_getbinval(old_row, tupdesc, i + 1, &old_isnull); new_value = SPI_getbinval(new_row, tupdesc, i + 1, &new_isnull); /* * If old and new value are NULL, the column is unchanged */ if (old_isnull && new_isnull) continue; /* * If both are NOT NULL, we need to compare the values and skip * setting the column if equal */ if (!old_isnull && !new_isnull) { Oid opr_oid; FmgrInfo *opr_finfo_p; /* * Lookup the equal operators function call info using the * typecache if available */ TypeCacheEntry *type_cache; type_cache = lookup_type_cache(SPI_gettypeid(tupdesc, i + 1), TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO); opr_oid = type_cache->eq_opr; if (opr_oid == ARRAY_EQ_OP) opr_oid = InvalidOid; else opr_finfo_p = &(type_cache->eq_opr_finfo); /* * If we have an equal operator, use that to do binary * comparision. Else get the string representation of both * attributes and do string comparision. */ if (OidIsValid(opr_oid)) { if (DatumGetBool(FunctionCall2(opr_finfo_p, old_value, new_value))) continue; } else { char *old_strval = SPI_getvalue(old_row, tupdesc, i + 1); char *new_strval = SPI_getvalue(new_row, tupdesc, i + 1); if (strcmp(old_strval, new_strval) == 0) continue; } } if (attkind[attkind_idx] == 'i') { /* this change should be ignored */ ignore_count++; continue; } if (need_comma) tbuf_append_char(cs->op_data, ','); else need_comma = true; col_ident = SPI_fname(tupdesc, i + 1); col_value = SPI_getvalue(new_row, tupdesc, i + 1); append_normal_eq(cs->op_data, col_ident, col_value); } /* * It can happen that the only UPDATE an application does is to set a * column to the same value again. In that case, we'd end up here with * no columns in the SET clause yet. We add the first key column here * with it's old value to simulate the same for the replication * engine. */ if (!need_comma) { /* there was change in ignored columns, skip whole event */ if (ignore_count > 0) return 0; for (i = 0, attkind_idx = -1; i < tg->tg_relation->rd_att->natts; i++) { if (tupdesc->attrs[i]->attisdropped) continue; attkind_idx++; if (attkind[attkind_idx] == 'k') break; } col_ident = SPI_fname(tupdesc, i + 1); col_value = SPI_getvalue(old_row, tupdesc, i + 1); append_key_eq(cs->op_data, col_ident, col_value); } tbuf_append_cstring(cs->op_data, " where "); for (i = 0, attkind_idx = -1; i < tg->tg_relation->rd_att->natts; i++) { /* * Ignore dropped columns */ if (tupdesc->attrs[i]->attisdropped) continue; attkind_idx++; if (attkind[attkind_idx] == '\0') break; if (attkind[attkind_idx] != 'k') continue; col_ident = SPI_fname(tupdesc, i + 1); col_value = SPI_getvalue(old_row, tupdesc, i + 1); if (need_and) tbuf_append_cstring(cs->op_data, " and "); else need_and = true; append_key_eq(cs->op_data, col_ident, col_value); } return 1; } static void process_delete(ArgCache *cs, TriggerData *tg, char *attkind) { HeapTuple old_row = tg->tg_trigtuple; TupleDesc tupdesc = tg->tg_relation->rd_att; char *col_ident; char *col_value; int i; int need_and = false; int attkind_idx; /* * DELETE * * op_type = 'D' op_data = "pk_ident"='value' [and ...] */ tbuf_append_cstring(cs->op_type, "D"); for (i = 0, attkind_idx = -1; i < tg->tg_relation->rd_att->natts; i++) { if (tupdesc->attrs[i]->attisdropped) continue; attkind_idx++; if (attkind[attkind_idx] == '\0') break; if (attkind[attkind_idx] != 'k') continue; col_ident = SPI_fname(tupdesc, i + 1); col_value = SPI_getvalue(old_row, tupdesc, i + 1); if (need_and) tbuf_append_cstring(cs->op_data, " and "); else need_and = true; append_key_eq(cs->op_data, col_ident, col_value); } } Datum logtriga(PG_FUNCTION_ARGS) { TriggerData *tg; Datum argv[2]; int rc; ArgCache *cs; TupleDesc tupdesc; int i; int attcnt; char *attkind; char *kpos; char *query; int need_event = 1; /* * Get the trigger call context */ if (!CALLED_AS_TRIGGER(fcinfo)) elog(ERROR, "logtriga not called as trigger"); tg = (TriggerData *) (fcinfo->context); tupdesc = tg->tg_relation->rd_att; /* * Check all logTrigger() calling conventions */ if (!TRIGGER_FIRED_AFTER(tg->tg_event)) elog(ERROR, "logtriga must be fired AFTER"); if (!TRIGGER_FIRED_FOR_ROW(tg->tg_event)) elog(ERROR, "logtriga must be fired FOR EACH ROW"); if (tg->tg_trigger->tgnargs != 2) elog(ERROR, "logtriga must be defined with 2 args"); /* * Connect to the SPI manager */ if ((rc = SPI_connect()) < 0) elog(ERROR, "logtriga: SPI_connect() failed"); cs = get_arg_cache(); tbuf_reset(cs->op_type); tbuf_reset(cs->op_data); /* * Get all the trigger arguments */ attkind = tg->tg_trigger->tgargs[0]; query = tg->tg_trigger->tgargs[1]; /* * Count number of active columns */ for (i = 0, attcnt = 0; i < tg->tg_relation->rd_att->natts; i++) { if (tupdesc->attrs[i]->attisdropped) continue; attcnt++; } /* * Make sure all 'k' columns exist and there is at least one of them. */ kpos = strrchr(attkind, 'k'); if (kpos == NULL) elog(ERROR, "logtriga: need at least one key column"); if (kpos - attkind >= attcnt) elog(ERROR, "logtriga: key column does not exist"); /* * Determine cmdtype and op_data depending on the command type */ if (TRIGGER_FIRED_BY_INSERT(tg->tg_event)) process_insert(cs, tg, attkind); else if (TRIGGER_FIRED_BY_UPDATE(tg->tg_event)) need_event = process_update(cs, tg, attkind); else if (TRIGGER_FIRED_BY_DELETE(tg->tg_event)) process_delete(cs, tg, attkind); else elog(ERROR, "logtriga fired for unhandled event"); /* * Construct the parameter array and insert the log row. */ if (need_event) { argv[0] = PointerGetDatum(tbuf_look_text(cs->op_type)); argv[1] = PointerGetDatum(tbuf_look_text(cs->op_data)); SPI_execp(get_plan(query), argv, NULL, 0); } SPI_finish(); return PointerGetDatum(NULL); } skytools-3.2.6/sql/logtriga/Makefile0000644000000000000000000000041412426435645014360 0ustar MODULE_big = logtriga SRCS = logtriga.c textbuf.c OBJS = $(SRCS:.c=.o) DATA_built = logtriga.sql REGRESS = logtriga PG_CONFIG = pg_config PGXS = $(shell $(PG_CONFIG) --pgxs) include $(PGXS) test: install make installcheck || { less regression.diffs; exit 1; } skytools-3.2.6/sql/logtriga/sql/0000755000000000000000000000000012426435645013520 5ustar skytools-3.2.6/sql/logtriga/sql/logtriga.sql0000644000000000000000000000344212426435645016054 0ustar -- init \set ECHO none \i logtriga.sql \set ECHO all create table rtest ( id integer primary key, dat text ); create table clog ( id serial, op text, data text ); create trigger rtest_triga after insert or update or delete on rtest for each row execute procedure logtriga('kv', 'insert into clog (op, data) values ($1, $2)'); -- simple test insert into rtest values (1, 'value1'); update rtest set dat = 'value2'; delete from rtest; select * from clog; delete from clog; -- test new fields alter table rtest add column dat2 text; insert into rtest values (1, 'value1'); update rtest set dat = 'value2'; delete from rtest; select * from clog; delete from clog; -- test field rename alter table rtest alter column dat type integer using 0; insert into rtest values (1, '666', 'newdat'); update rtest set dat = 5; delete from rtest; select * from clog; delete from clog; -- test field ignore drop trigger rtest_triga on rtest; create trigger rtest_triga after insert or update or delete on rtest for each row execute procedure logtriga('kiv', 'insert into clog (op, data) values ($1, $2)'); insert into rtest values (1, '666', 'newdat'); update rtest set dat = 5, dat2 = 'newdat2'; update rtest set dat = 6; delete from rtest; select * from clog; delete from clog; -- test wrong key drop trigger rtest_triga on rtest; create trigger rtest_triga after insert or update or delete on rtest for each row execute procedure logtriga('vik', 'insert into clog (op, data) values ($1, $2)'); insert into rtest values (1, 0, 'non-null'); insert into rtest values (2, 0, NULL); update rtest set dat2 = 'non-null2' where id=1; update rtest set dat2 = NULL where id=1; update rtest set dat2 = 'new-nonnull' where id=2; delete from rtest where id=1; delete from rtest where id=2; select * from clog; delete from clog; skytools-3.2.6/sql/logtriga/textbuf.h0000644000000000000000000000114112426435645014550 0ustar struct TBuf; typedef struct TBuf TBuf; TBuf *tbuf_alloc(int start_size); void tbuf_free(TBuf *tbuf); int tbuf_get_size(TBuf *tbuf); void tbuf_reset(TBuf *tbuf); const text *tbuf_look_text(TBuf *tbuf); const char *tbuf_look_cstring(TBuf *tbuf); void tbuf_append_cstring(TBuf *tbuf, const char *str); void tbuf_append_text(TBuf *tbuf, const text *str); void tbuf_append_char(TBuf *tbuf, char chr); text *tbuf_steal_text(TBuf *tbuf); void tbuf_encode_cstring(TBuf *tbuf, const char *str, const char *encoding); void tbuf_encode_data(TBuf *tbuf, const uint8 *data, int len, const char *encoding); skytools-3.2.6/sql/logtriga/logtriga.sql.in0000644000000000000000000000033412426435645015657 0ustar -- usage: logtriga(flds, query) -- -- query should include 2 args: -- $1 - for op type I/U/D, -- $2 - for op data CREATE OR REPLACE FUNCTION logtriga() RETURNS trigger AS 'MODULE_PATHNAME', 'logtriga' LANGUAGE C; skytools-3.2.6/sql/logtriga/expected/0000755000000000000000000000000012426435645014522 5ustar skytools-3.2.6/sql/logtriga/expected/logtriga.out0000644000000000000000000000611712426435645017070 0ustar -- init \set ECHO none create table rtest ( id integer primary key, dat text ); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "rtest_pkey" for table "rtest" create table clog ( id serial, op text, data text ); NOTICE: CREATE TABLE will create implicit sequence "clog_id_seq" for serial column "clog.id" create trigger rtest_triga after insert or update or delete on rtest for each row execute procedure logtriga('kv', 'insert into clog (op, data) values ($1, $2)'); -- simple test insert into rtest values (1, 'value1'); update rtest set dat = 'value2'; delete from rtest; select * from clog; delete from clog; id | op | data ----+----+-------------------------------- 1 | I | (id,dat) values ('1','value1') 2 | U | dat='value2' where id='1' 3 | D | id='1' (3 rows) -- test new fields alter table rtest add column dat2 text; insert into rtest values (1, 'value1'); update rtest set dat = 'value2'; delete from rtest; select * from clog; delete from clog; id | op | data ----+----+-------------------------------- 4 | I | (id,dat) values ('1','value1') 5 | U | dat='value2' where id='1' 6 | D | id='1' (3 rows) -- test field rename alter table rtest alter column dat type integer using 0; insert into rtest values (1, '666', 'newdat'); update rtest set dat = 5; delete from rtest; select * from clog; delete from clog; id | op | data ----+----+----------------------------- 7 | I | (id,dat) values ('1','666') 8 | U | dat='5' where id='1' 9 | D | id='1' (3 rows) -- test field ignore drop trigger rtest_triga on rtest; create trigger rtest_triga after insert or update or delete on rtest for each row execute procedure logtriga('kiv', 'insert into clog (op, data) values ($1, $2)'); insert into rtest values (1, '666', 'newdat'); update rtest set dat = 5, dat2 = 'newdat2'; update rtest set dat = 6; delete from rtest; select * from clog; delete from clog; id | op | data ----+----+--------------------------------- 10 | I | (id,dat2) values ('1','newdat') 11 | U | dat2='newdat2' where id='1' 12 | D | id='1' (3 rows) -- test wrong key drop trigger rtest_triga on rtest; create trigger rtest_triga after insert or update or delete on rtest for each row execute procedure logtriga('vik', 'insert into clog (op, data) values ($1, $2)'); insert into rtest values (1, 0, 'non-null'); insert into rtest values (2, 0, NULL); update rtest set dat2 = 'non-null2' where id=1; update rtest set dat2 = NULL where id=1; update rtest set dat2 = 'new-nonnull' where id=2; ERROR: logtriga: Unexpected NULL key value delete from rtest where id=1; ERROR: logtriga: Unexpected NULL key value delete from rtest where id=2; ERROR: logtriga: Unexpected NULL key value select * from clog; delete from clog; id | op | data ----+----+---------------------------------------- 13 | I | (id,dat2) values ('1','non-null') 14 | I | (id,dat2) values ('2',null) 15 | U | dat2='non-null2' where dat2='non-null' 16 | U | dat2=NULL where dat2='non-null2' (4 rows) skytools-3.2.6/sql/txid/0000755000000000000000000000000012426435645012061 5ustar skytools-3.2.6/sql/txid/txid.c0000644000000000000000000002360212426435645013200 0ustar /*------------------------------------------------------------------------- * txid.c * * Safe handling of transaction ID's. * * Copyright (c) 2003-2004, PostgreSQL Global Development Group * Author: Jan Wieck, Afilias USA INC. * * 64-bit output: Marko Kreen, Skype Technologies *------------------------------------------------------------------------- */ #include "postgres.h" #include #include "access/xact.h" #include "funcapi.h" #include "lib/stringinfo.h" #include "libpq/pqformat.h" #include "txid.h" #ifdef INT64_IS_BUSTED #error txid needs working int64 #endif #ifdef PG_MODULE_MAGIC PG_MODULE_MAGIC; #endif #ifndef SET_VARSIZE #define SET_VARSIZE(x, len) VARATT_SIZEP(x) = len #endif /* txid will be signed int8 in database, so must limit to 63 bits */ #define MAX_TXID UINT64CONST(0x7FFFFFFFFFFFFFFF) /* * If defined, use bsearch() function for searching * txid's inside snapshots that have more than given values. */ #define USE_BSEARCH_FOR 100 /* * public functions */ PG_FUNCTION_INFO_V1(txid_current); PG_FUNCTION_INFO_V1(txid_snapshot_in); PG_FUNCTION_INFO_V1(txid_snapshot_out); PG_FUNCTION_INFO_V1(txid_snapshot_recv); PG_FUNCTION_INFO_V1(txid_snapshot_send); PG_FUNCTION_INFO_V1(txid_current_snapshot); PG_FUNCTION_INFO_V1(txid_snapshot_xmin); PG_FUNCTION_INFO_V1(txid_snapshot_xmax); /* new API in 8.3 */ PG_FUNCTION_INFO_V1(txid_visible_in_snapshot); PG_FUNCTION_INFO_V1(txid_snapshot_xip); /* old API */ PG_FUNCTION_INFO_V1(txid_in_snapshot); PG_FUNCTION_INFO_V1(txid_not_in_snapshot); PG_FUNCTION_INFO_V1(txid_snapshot_active); /* * utility functions */ static int _cmp_txid(const void *aa, const void *bb) { const uint64 *a = aa; const uint64 *b = bb; if (*a < *b) return -1; if (*a > *b) return 1; return 0; } static void sort_snapshot(TxidSnapshot *snap) { qsort(snap->xip, snap->nxip, sizeof(txid), _cmp_txid); } static StringInfo buf_init(txid xmin, txid xmax) { TxidSnapshot snap; StringInfo buf; snap.xmin = xmin; snap.xmax = xmax; snap.nxip = 0; buf = makeStringInfo(); appendBinaryStringInfo(buf, (char *)&snap, offsetof(TxidSnapshot, xip)); return buf; } static void buf_add_txid(StringInfo buf, txid xid) { TxidSnapshot *snap = (TxidSnapshot *)buf->data; snap->nxip++; appendBinaryStringInfo(buf, (char *)&xid, sizeof(xid)); } static TxidSnapshot * buf_finalize(StringInfo buf) { TxidSnapshot *snap = (TxidSnapshot *)buf->data; SET_VARSIZE(snap, buf->len); /* buf is not needed anymore */ buf->data = NULL; pfree(buf); return snap; } static TxidSnapshot * parse_snapshot(const char *str) { txid xmin; txid xmax; txid last_val = 0, val; char *endp; StringInfo buf; xmin = (txid) strtoull(str, &endp, 0); if (*endp != ':') goto bad_format; str = endp + 1; xmax = (txid) strtoull(str, &endp, 0); if (*endp != ':') goto bad_format; str = endp + 1; /* it should look sane */ if (xmin >= xmax || xmin == 0 || xmax > MAX_INT64) goto bad_format; /* allocate buffer */ buf = buf_init(xmin, xmax); /* loop over values */ while (*str != '\0') { /* read next value */ val = (txid) strtoull(str, &endp, 0); str = endp; /* require the input to be in order */ if (val < xmin || val <= last_val || val >= xmax) goto bad_format; buf_add_txid(buf, val); last_val = val; if (*str == ',') str++; else if (*str != '\0') goto bad_format; } return buf_finalize(buf); bad_format: elog(ERROR, "illegal txid_snapshot input format"); return NULL; } /* * Public functions */ /* * txid_current - Return the current transaction ID as txid */ Datum txid_current(PG_FUNCTION_ARGS) { txid val; TxidEpoch state; txid_load_epoch(&state, 0); val = txid_convert_xid(GetTopTransactionId(), &state); PG_RETURN_INT64(val); } /* * txid_current_snapshot - return current snapshot */ Datum txid_current_snapshot(PG_FUNCTION_ARGS) { TxidSnapshot *snap; unsigned num, i, size; TxidEpoch state; if (SerializableSnapshot == NULL) elog(ERROR, "get_current_snapshot: SerializableSnapshot == NULL"); txid_load_epoch(&state, 1); num = SerializableSnapshot->xcnt; size = offsetof(TxidSnapshot, xip) + sizeof(txid) * num; snap = palloc(size); SET_VARSIZE(snap, size); snap->xmin = txid_convert_xid(SerializableSnapshot->xmin, &state); snap->xmax = txid_convert_xid(SerializableSnapshot->xmax, &state); snap->nxip = num; for (i = 0; i < num; i++) snap->xip[i] = txid_convert_xid(SerializableSnapshot->xip[i], &state); /* we want them guaranteed ascending order */ sort_snapshot(snap); PG_RETURN_POINTER(snap); } /* * txid_snapshot_in - input function for type txid_snapshot */ Datum txid_snapshot_in(PG_FUNCTION_ARGS) { TxidSnapshot *snap; char *str = PG_GETARG_CSTRING(0); snap = parse_snapshot(str); PG_RETURN_POINTER(snap); } /* * txid_snapshot_out - output function for type txid_snapshot */ Datum txid_snapshot_out(PG_FUNCTION_ARGS) { TxidSnapshot *snap; StringInfoData str; int i; snap = (TxidSnapshot *) PG_GETARG_VARLENA_P(0); initStringInfo(&str); appendStringInfo(&str, "%llu:", (unsigned long long)snap->xmin); appendStringInfo(&str, "%llu:", (unsigned long long)snap->xmax); for (i = 0; i < snap->nxip; i++) { appendStringInfo(&str, "%s%llu", ((i > 0) ? "," : ""), (unsigned long long)snap->xip[i]); } PG_FREE_IF_COPY(snap, 0); PG_RETURN_CSTRING(str.data); } /* * txid_snapshot_recv(internal) returns txid_snapshot * * binary input function for type txid_snapshot * * format: int4 nxip, int8 xmin, int8 xmax, int8 xip */ Datum txid_snapshot_recv(PG_FUNCTION_ARGS) { StringInfo buf = (StringInfo) PG_GETARG_POINTER(0); TxidSnapshot *snap; txid last = 0; int nxip; int i; int avail; int expect; txid xmin, xmax; /* * load nxip and check for nonsense. * * (nxip > avail) check is against int overflows in 'expect'. */ nxip = pq_getmsgint(buf, 4); avail = buf->len - buf->cursor; expect = 8 + 8 + nxip * 8; if (nxip < 0 || nxip > avail || expect > avail) goto bad_format; xmin = pq_getmsgint64(buf); xmax = pq_getmsgint64(buf); if (xmin == 0 || xmax == 0 || xmin > xmax || xmax > MAX_TXID) goto bad_format; snap = palloc(TXID_SNAPSHOT_SIZE(nxip)); snap->xmin = xmin; snap->xmax = xmax; snap->nxip = nxip; SET_VARSIZE(snap, TXID_SNAPSHOT_SIZE(nxip)); for (i = 0; i < nxip; i++) { txid cur = pq_getmsgint64(buf); if (cur <= last || cur < xmin || cur >= xmax) goto bad_format; snap->xip[i] = cur; last = cur; } PG_RETURN_POINTER(snap); bad_format: elog(ERROR, "invalid snapshot data"); return (Datum)NULL; } /* * txid_snapshot_send(txid_snapshot) returns bytea * * binary output function for type txid_snapshot * * format: int4 nxip, int8 xmin, int8 xmax, int8 xip */ Datum txid_snapshot_send(PG_FUNCTION_ARGS) { TxidSnapshot *snap = (TxidSnapshot *)PG_GETARG_VARLENA_P(0); StringInfoData buf; uint32 i; pq_begintypsend(&buf); pq_sendint(&buf, snap->nxip, 4); pq_sendint64(&buf, snap->xmin); pq_sendint64(&buf, snap->xmax); for (i = 0; i < snap->nxip; i++) pq_sendint64(&buf, snap->xip[i]); PG_RETURN_BYTEA_P(pq_endtypsend(&buf)); } static int _txid_in_snapshot(txid value, const TxidSnapshot *snap) { if (value < snap->xmin) return true; else if (value >= snap->xmax) return false; #ifdef USE_BSEARCH_FOR else if (snap->nxip >= USE_BSEARCH_FOR) { void *res; res = bsearch(&value, snap->xip, snap->nxip, sizeof(txid), _cmp_txid); return (res) ? false : true; } #endif else { int i; for (i = 0; i < snap->nxip; i++) { if (value == snap->xip[i]) return false; } return true; } } /* * txid_in_snapshot - is txid visible in snapshot ? */ Datum txid_in_snapshot(PG_FUNCTION_ARGS) { txid value = PG_GETARG_INT64(0); TxidSnapshot *snap = (TxidSnapshot *) PG_GETARG_VARLENA_P(1); int res; res = _txid_in_snapshot(value, snap) ? true : false; PG_FREE_IF_COPY(snap, 1); PG_RETURN_BOOL(res); } /* * changed api */ Datum txid_visible_in_snapshot(PG_FUNCTION_ARGS) { txid value = PG_GETARG_INT64(0); TxidSnapshot *snap = (TxidSnapshot *) PG_GETARG_VARLENA_P(1); int res; res = _txid_in_snapshot(value, snap) ? true : false; PG_FREE_IF_COPY(snap, 1); PG_RETURN_BOOL(res); } /* * txid_not_in_snapshot - is txid invisible in snapshot ? */ Datum txid_not_in_snapshot(PG_FUNCTION_ARGS) { txid value = PG_GETARG_INT64(0); TxidSnapshot *snap = (TxidSnapshot *) PG_GETARG_VARLENA_P(1); int res; res = _txid_in_snapshot(value, snap) ? false : true; PG_FREE_IF_COPY(snap, 1); PG_RETURN_BOOL(res); } /* * txid_snapshot_xmin - return snapshot's xmin */ Datum txid_snapshot_xmin(PG_FUNCTION_ARGS) { TxidSnapshot *snap = (TxidSnapshot *) PG_GETARG_VARLENA_P(0); txid res = snap->xmin; PG_FREE_IF_COPY(snap, 0); PG_RETURN_INT64(res); } /* * txid_snapshot_xmin - return snapshot's xmax */ Datum txid_snapshot_xmax(PG_FUNCTION_ARGS) { TxidSnapshot *snap = (TxidSnapshot *) PG_GETARG_VARLENA_P(0); txid res = snap->xmax; PG_FREE_IF_COPY(snap, 0); PG_RETURN_INT64(res); } /* remember state between function calls */ struct snap_state { int pos; TxidSnapshot *snap; }; /* * txid_snapshot_active - returns uncommitted TXID's in snapshot. */ Datum txid_snapshot_xip(PG_FUNCTION_ARGS) { FuncCallContext *fctx; struct snap_state *state; if (SRF_IS_FIRSTCALL()) { TxidSnapshot *snap; int statelen; snap = (TxidSnapshot *) PG_GETARG_VARLENA_P(0); fctx = SRF_FIRSTCALL_INIT(); statelen = sizeof(*state) + VARSIZE(snap); state = MemoryContextAlloc(fctx->multi_call_memory_ctx, statelen); state->pos = 0; state->snap = (TxidSnapshot *)((char *)state + sizeof(*state)); memcpy(state->snap, snap, VARSIZE(snap)); fctx->user_fctx = state; PG_FREE_IF_COPY(snap, 0); } fctx = SRF_PERCALL_SETUP(); state = fctx->user_fctx; if (state->pos < state->snap->nxip) { Datum res = Int64GetDatum(state->snap->xip[state->pos]); state->pos++; SRF_RETURN_NEXT(fctx, res); } else { SRF_RETURN_DONE(fctx); } } /* old api */ Datum txid_snapshot_active(PG_FUNCTION_ARGS) { return txid_snapshot_xip(fcinfo); } skytools-3.2.6/sql/txid/txid.h0000644000000000000000000000242612426435645013206 0ustar #ifndef _TXID_H_ #define _TXID_H_ #define MAX_INT64 0x7FFFFFFFFFFFFFFFLL /* Use unsigned variant internally */ typedef uint64 txid; typedef struct { int32 __varsz; /* should not be touched directly */ uint32 nxip; txid xmin; txid xmax; txid xip[1]; } TxidSnapshot; #define TXID_SNAPSHOT_SIZE(nxip) (offsetof(TxidSnapshot, xip) + sizeof(txid) * nxip) typedef struct { uint64 last_value; uint64 epoch; } TxidEpoch; /* internal functions */ void txid_load_epoch(TxidEpoch *state, int try_write); txid txid_convert_xid(TransactionId xid, TxidEpoch *state); /* public functions */ Datum txid_current(PG_FUNCTION_ARGS); Datum txid_current_snapshot(PG_FUNCTION_ARGS); Datum txid_snapshot_in(PG_FUNCTION_ARGS); Datum txid_snapshot_out(PG_FUNCTION_ARGS); Datum txid_snapshot_recv(PG_FUNCTION_ARGS); Datum txid_snapshot_send(PG_FUNCTION_ARGS); Datum txid_snapshot_xmin(PG_FUNCTION_ARGS); Datum txid_snapshot_xmax(PG_FUNCTION_ARGS); Datum txid_snapshot_xip(PG_FUNCTION_ARGS); Datum txid_visible_in_snapshot(PG_FUNCTION_ARGS); Datum txid_snapshot_active(PG_FUNCTION_ARGS); Datum txid_in_snapshot(PG_FUNCTION_ARGS); Datum txid_not_in_snapshot(PG_FUNCTION_ARGS); #endif /* _TXID_H_ */ skytools-3.2.6/sql/txid/epoch.c0000644000000000000000000001215312426435645013325 0ustar /*------------------------------------------------------------------------- * epoch.c * * Detect current epoch. *------------------------------------------------------------------------- */ #include "postgres.h" #include #include "access/transam.h" #include "executor/spi.h" #include "miscadmin.h" #include "catalog/pg_control.h" #include "access/xlog.h" #include "txid.h" /* * do a TransactionId -> txid conversion */ txid txid_convert_xid(TransactionId xid, TxidEpoch *state) { uint64 epoch; /* avoid issues with the the special meaning of 0 */ if (xid == InvalidTransactionId) return MAX_INT64; /* return special xid's as-is */ if (xid < FirstNormalTransactionId) return xid; /* xid can on both sides on wrap-around */ epoch = state->epoch; if (TransactionIdPrecedes(xid, state->last_value)) { if (xid > state->last_value) epoch--; } else if (TransactionIdFollows(xid, state->last_value)) { if (xid < state->last_value) epoch++; } return (epoch << 32) | xid; } #if PG_CONTROL_VERSION >= 820 /* * PostgreSQl 8.2 keeps track of epoch internally. */ void txid_load_epoch(TxidEpoch *state, int try_write) { TransactionId xid; uint32 epoch; GetNextXidAndEpoch(&xid, &epoch); state->epoch = epoch; state->last_value = xid; } #else /* * For older PostgreSQL keep epoch in table. */ /* * this caches the txid_epoch table. * The struct should be updated only together with the table. */ static TxidEpoch epoch_state = { 0, 0 }; /* * load values from txid_epoch table. */ static int load_epoch(void) { HeapTuple row; TupleDesc rdesc; bool isnull = false; Datum tmp; int res; uint64 db_epoch, db_value; res = SPI_connect(); if (res < 0) elog(ERROR, "cannot connect to SPI"); res = SPI_execute("select epoch, last_value from txid.epoch", true, 0); if (res != SPI_OK_SELECT) elog(ERROR, "load_epoch: select failed?"); if (SPI_processed != 1) elog(ERROR, "load_epoch: there must be exactly 1 row"); row = SPI_tuptable->vals[0]; rdesc = SPI_tuptable->tupdesc; tmp = SPI_getbinval(row, rdesc, 1, &isnull); if (isnull) elog(ERROR, "load_epoch: epoch is NULL"); db_epoch = DatumGetInt64(tmp); tmp = SPI_getbinval(row, rdesc, 2, &isnull); if (isnull) elog(ERROR, "load_epoch: last_value is NULL"); db_value = DatumGetInt64(tmp); SPI_finish(); /* * If the db has lesser values, then some updates were lost. * * Should that be special-cased? ATM just use db values. * Thus immidiate update. */ epoch_state.epoch = db_epoch; epoch_state.last_value = db_value; return 1; } /* * updates last_value and epoch, if needed */ static void save_epoch(void) { int res; char qbuf[200]; uint64 new_epoch, new_value; TransactionId xid = GetTopTransactionId(); TransactionId old_value; /* store old state */ MemoryContext oldcontext = CurrentMemoryContext; ResourceOwner oldowner = CurrentResourceOwner; /* * avoid changing internal values. */ new_value = xid; new_epoch = epoch_state.epoch; old_value = (TransactionId)epoch_state.last_value; if (xid < old_value) { if (TransactionIdFollows(xid, old_value)) new_epoch++; else return; } sprintf(qbuf, "update txid.epoch set epoch = %llu, last_value = %llu", (unsigned long long)new_epoch, (unsigned long long)new_value); /* * The update may fail in case of SERIALIZABLE transaction. * Try to catch the error and hide it. */ BeginInternalSubTransaction(NULL); PG_TRY(); { /* do the update */ res = SPI_connect(); if (res < 0) elog(ERROR, "cannot connect to SPI"); res = SPI_execute(qbuf, false, 0); SPI_finish(); ReleaseCurrentSubTransaction(); } PG_CATCH(); { /* we expect rollback to clean up inner SPI call */ RollbackAndReleaseCurrentSubTransaction(); FlushErrorState(); res = -1; /* remember failure */ } PG_END_TRY(); /* restore old state */ MemoryContextSwitchTo(oldcontext); CurrentResourceOwner = oldowner; if (res < 0) return; /* * Seems the update was successful, update internal state too. * * There is a chance that the TX will be rollbacked, but then * another backend will do the update, or this one at next * checkpoint. */ epoch_state.epoch = new_epoch; epoch_state.last_value = new_value; } static void check_epoch(int update_prio) { TransactionId xid = GetTopTransactionId(); TransactionId recheck, tx_next; int ok = 1; /* should not happen, but just in case */ if (xid == InvalidTransactionId) return; /* new backend */ if (epoch_state.last_value == 0) load_epoch(); /* try to avoid concurrent access */ if (update_prio) recheck = 50000 + 100 * (MyProcPid & 0x1FF); else recheck = 300000 + 1000 * (MyProcPid & 0x1FF); /* read table */ tx_next = (TransactionId)epoch_state.last_value + recheck; if (TransactionIdFollows(xid, tx_next)) ok = load_epoch(); /* * check if save is needed. last_value may be updated above. */ tx_next = (TransactionId)epoch_state.last_value + recheck; if (!ok || TransactionIdFollows(xid, tx_next)) save_epoch(); } void txid_load_epoch(TxidEpoch *state, int try_write) { check_epoch(try_write); state->epoch = epoch_state.epoch; state->last_value = epoch_state.last_value; } #endif skytools-3.2.6/sql/txid/README.txid0000644000000000000000000000321112426435645013705 0ustar txid - 8 byte transaction ID's ============================== Based on xxid module from Slony-I. The goal is to make PostgreSQL internal transaction ID and snapshot data usable externally. They cannot be used directly as the internal 4-byte value wraps around and thus breaks indexing. This module extends the internal value with wraparound cound (epoch). It uses relaxed method for wraparound check. There is a table txid.epoch (epoch, last_value) which is used to check if the xid is in current, next or previous epoch. It requires only occasional read-write access - ca. after 100k - 500k transactions. Also it contains type 'txid_snapshot' and following functions: txid_current() returns int8 Current transaction ID txid_current_snapshot() returns txid_snapshot Current snapshot txid_snapshot_xmin( snap ) returns int8 Smallest TXID in snapshot. TXID's smaller than this are all visible in snapshot. txid_snapshot_xmax( snap ) returns int8 Largest TXID in snapshot. TXID's starting from this one are all invisible in snapshot. txid_snapshot_xip( snap ) setof int8 List of uncommitted TXID's in snapshot, that are invisible in snapshot. Values are between xmin and xmax. txid_visible_in_snapshot(id, snap) returns bool Is TXID visible in snapshot? Problems -------- - it breaks when there are more than 2G tx'es between calls. Fixed in 8.2 - functions that create new txid's should be 'security definers' thus better protecting txid_epoch table. - After loading database from backup you should do: UPDATE txid.epoch SET epoch = epoch + 1, last_value = (get_current_txid() & 4294967295); skytools-3.2.6/sql/txid/txid.schema.sql0000644000000000000000000000210712426435645015011 0ustar -- ---------- -- txid.sql -- -- SQL script for loading the transaction ID compatible datatype -- -- Copyright (c) 2003-2004, PostgreSQL Global Development Group -- Author: Jan Wieck, Afilias USA INC. -- -- ---------- -- -- now the epoch storage -- CREATE SCHEMA txid; -- remember txid settings -- use bigint so we can do arithmetic with it create table txid.epoch ( epoch bigint, last_value bigint ); -- make sure there exist exactly one row insert into txid.epoch values (0, 1); -- then protect it create function txid.epoch_guard() returns trigger as $$ begin if TG_OP = 'UPDATE' then -- epoch: allow only small increase if NEW.epoch > OLD.epoch and NEW.epoch < (OLD.epoch + 3) then return NEW; end if; -- last_value: allow only increase if NEW.epoch = OLD.epoch and NEW.last_value > OLD.last_value then return NEW; end if; end if; raise exception 'bad operation on txid.epoch'; end; $$ language plpgsql; -- the trigger create trigger epoch_guard_trigger before insert or update or delete on txid.epoch for each row execute procedure txid.epoch_guard(); skytools-3.2.6/sql/txid/txid.std.sql0000644000000000000000000000534712426435645014354 0ustar -- ---------- -- txid.sql -- -- SQL script for loading the transaction ID compatible datatype -- -- Copyright (c) 2003-2004, PostgreSQL Global Development Group -- Author: Jan Wieck, Afilias USA INC. -- -- ---------- set client_min_messages = 'warning'; CREATE DOMAIN txid AS bigint CHECK (value > 0); -- -- A special transaction snapshot data type for faster visibility checks -- CREATE OR REPLACE FUNCTION txid_snapshot_in(cstring) RETURNS txid_snapshot AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; CREATE OR REPLACE FUNCTION txid_snapshot_out(txid_snapshot) RETURNS cstring AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; CREATE OR REPLACE FUNCTION txid_snapshot_recv(internal) RETURNS txid_snapshot AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; CREATE OR REPLACE FUNCTION txid_snapshot_send(txid_snapshot) RETURNS bytea AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; -- -- The data type itself -- CREATE TYPE txid_snapshot ( INPUT = txid_snapshot_in, OUTPUT = txid_snapshot_out, RECEIVE = txid_snapshot_recv, SEND = txid_snapshot_send, INTERNALLENGTH = variable, STORAGE = extended, ALIGNMENT = double ); --CREATE OR REPLACE FUNCTION get_current_txid() CREATE OR REPLACE FUNCTION txid_current() RETURNS bigint AS 'MODULE_PATHNAME', 'txid_current' LANGUAGE C STABLE SECURITY DEFINER; -- CREATE OR REPLACE FUNCTION get_current_snapshot() CREATE OR REPLACE FUNCTION txid_current_snapshot() RETURNS txid_snapshot AS 'MODULE_PATHNAME', 'txid_current_snapshot' LANGUAGE C STABLE SECURITY DEFINER; --CREATE OR REPLACE FUNCTION get_snapshot_xmin(txid_snapshot) CREATE OR REPLACE FUNCTION txid_snapshot_xmin(txid_snapshot) RETURNS bigint AS 'MODULE_PATHNAME', 'txid_snapshot_xmin' LANGUAGE C IMMUTABLE STRICT; -- CREATE OR REPLACE FUNCTION get_snapshot_xmax(txid_snapshot) CREATE OR REPLACE FUNCTION txid_snapshot_xmax(txid_snapshot) RETURNS bigint AS 'MODULE_PATHNAME', 'txid_snapshot_xmax' LANGUAGE C IMMUTABLE STRICT; -- CREATE OR REPLACE FUNCTION get_snapshot_active(txid_snapshot) CREATE OR REPLACE FUNCTION txid_snapshot_xip(txid_snapshot) RETURNS setof bigint AS 'MODULE_PATHNAME', 'txid_snapshot_xip' LANGUAGE C IMMUTABLE STRICT; -- -- Special comparision functions used by the remote worker -- for sync chunk selection -- CREATE OR REPLACE FUNCTION txid_visible_in_snapshot(bigint, txid_snapshot) RETURNS boolean AS 'MODULE_PATHNAME', 'txid_visible_in_snapshot' LANGUAGE C IMMUTABLE STRICT; /* CREATE OR REPLACE FUNCTION txid_in_snapshot(bigint, txid_snapshot) RETURNS boolean AS 'MODULE_PATHNAME', 'txid_in_snapshot' LANGUAGE C IMMUTABLE STRICT; CREATE OR REPLACE FUNCTION txid_not_in_snapshot(bigint, txid_snapshot) RETURNS boolean AS 'MODULE_PATHNAME', 'txid_not_in_snapshot' LANGUAGE C IMMUTABLE STRICT; */ skytools-3.2.6/sql/txid/Makefile0000644000000000000000000000226212426435645013523 0ustar PG_CONFIG = pg_config PGXS = $(shell $(PG_CONFIG) --pgxs) PGVER := $(shell $(PG_CONFIG) --version | sed 's/PostgreSQL //') ifeq ($(PGVER),) $(error Failed to get Postgres version) else # postgres >= manages epoch itself, so skip epoch tables pg83 = $(shell test $(PGVER) "<" "8.3" && echo "false" || echo "true") pg82 = $(shell test $(PGVER) "<" "8.2" && echo "false" || echo "true") endif ifeq ($(pg83),true) # we have 8.3 with internal txid # install empty txid.sql DATA_built = txid.sql include $(PGXS) txid.sql: txid.internal.sql cp $< $@ else # 8.2 or 8.1 # # pg < 8.3 needs this module # MODULE_big = txid SRCS = txid.c epoch.c OBJS = $(SRCS:.c=.o) REGRESS = txid REGRESS_OPTS = --load-language=plpgsql DATA = uninstall_txid.sql DOCS = README.txid DATA_built = txid.sql EXTRA_CLEAN = txid.sql.in # PGXS build procedure include $(PGXS) ifeq ($(pg82),true) # 8.2 tracks epoch internally TXID_SQL = txid.std.sql else # 8.1 needs epoch-tracking code TXID_SQL = txid.std.sql txid.schema.sql endif # ! 8.2 # additional deps txid.o: txid.h epoch.o: txid.h txid.sql.in: $(TXID_SQL) cat $(TXID_SQL) > $@ endif # ! 8.3 test: install make installcheck || { less regression.diffs; exit 1; } skytools-3.2.6/sql/txid/sql/0000755000000000000000000000000012426435645012660 5ustar skytools-3.2.6/sql/txid/sql/txid.sql0000644000000000000000000000212612426435645014352 0ustar -- init \set ECHO none \i txid.sql \set ECHO all -- i/o select '12:13:'::txid_snapshot; select '12:13:1,2'::txid_snapshot; -- errors select '31:12:'::txid_snapshot; select '0:1:'::txid_snapshot; select '12:13:0'::txid_snapshot; select '12:13:2,1'::txid_snapshot; create table snapshot_test ( nr integer, snap txid_snapshot ); insert into snapshot_test values (1, '12:13:'); insert into snapshot_test values (2, '12:20:13,15,18'); insert into snapshot_test values (3, '100001:100009:100005,100007,100008'); select snap from snapshot_test order by nr; select txid_snapshot_xmin(snap), txid_snapshot_xmax(snap), txid_snapshot_xip(snap) from snapshot_test order by nr; select id, txid_visible_in_snapshot(id, snap) from snapshot_test, generate_series(11, 21) id where nr = 2; -- test current values also select txid_current() >= txid_snapshot_xmin(txid_current_snapshot()); -- select txid_current_txid() < txid_snapshot_xmax(txid_current_snapshot()); -- select txid_in_snapshot(txid_current_txid(), txid_current_snapshot()), -- txid_not_in_snapshot(txid_current_txid(), txid_current_snapshot()); skytools-3.2.6/sql/txid/expected/0000755000000000000000000000000012426435645013662 5ustar skytools-3.2.6/sql/txid/expected/txid.out0000644000000000000000000000420312426435645015362 0ustar -- init \set ECHO none -- i/o select '12:13:'::txid_snapshot; txid_snapshot --------------- 12:13: (1 row) select '12:13:1,2'::txid_snapshot; ERROR: illegal txid_snapshot input format -- errors select '31:12:'::txid_snapshot; ERROR: illegal txid_snapshot input format select '0:1:'::txid_snapshot; ERROR: illegal txid_snapshot input format select '12:13:0'::txid_snapshot; ERROR: illegal txid_snapshot input format select '12:13:2,1'::txid_snapshot; ERROR: illegal txid_snapshot input format create table snapshot_test ( nr integer, snap txid_snapshot ); insert into snapshot_test values (1, '12:13:'); insert into snapshot_test values (2, '12:20:13,15,18'); insert into snapshot_test values (3, '100001:100009:100005,100007,100008'); select snap from snapshot_test order by nr; snap ------------------------------------ 12:13: 12:20:13,15,18 100001:100009:100005,100007,100008 (3 rows) select txid_snapshot_xmin(snap), txid_snapshot_xmax(snap), txid_snapshot_xip(snap) from snapshot_test order by nr; txid_snapshot_xmin | txid_snapshot_xmax | txid_snapshot_xip --------------------+--------------------+------------------- 12 | 20 | 13 12 | 20 | 15 12 | 20 | 18 100001 | 100009 | 100005 100001 | 100009 | 100007 100001 | 100009 | 100008 (6 rows) select id, txid_visible_in_snapshot(id, snap) from snapshot_test, generate_series(11, 21) id where nr = 2; id | txid_visible_in_snapshot ----+-------------------------- 11 | t 12 | t 13 | f 14 | t 15 | f 16 | t 17 | t 18 | f 19 | t 20 | f 21 | f (11 rows) -- test current values also select txid_current() >= txid_snapshot_xmin(txid_current_snapshot()); ?column? ---------- t (1 row) -- select txid_current_txid() < txid_snapshot_xmax(txid_current_snapshot()); -- select txid_in_snapshot(txid_current_txid(), txid_current_snapshot()), -- txid_not_in_snapshot(txid_current_txid(), txid_current_snapshot()); skytools-3.2.6/sql/txid/txid.internal.sql0000644000000000000000000000003312426435645015361 0ustar -- txid is included in 8.3 skytools-3.2.6/sql/txid/uninstall_txid.sql0000644000000000000000000000015612426435645015645 0ustar DROP DOMAIN txid; DROP TYPE txid_snapshot cascade; DROP SCHEMA txid CASCADE; DROP FUNCTION txid_current(); skytools-3.2.6/sql/ticker/0000755000000000000000000000000012426435645012372 5ustar skytools-3.2.6/sql/ticker/ticker.c0000644000000000000000000000640312426435645014022 0ustar #include "pgqd.h" static void run_pgq_check(struct PgDatabase *db) { const char *q = "select 1 from pg_catalog.pg_namespace where nspname='pgq'"; log_debug("%s: %s", db->name, q); pgs_send_query_simple(db->c_ticker, q); db->state = DB_TICKER_CHECK_PGQ; } static void run_version_check(struct PgDatabase *db) { const char *q = "select pgq.version()"; log_debug("%s: %s", db->name, q); pgs_send_query_simple(db->c_ticker, q); db->state = DB_TICKER_CHECK_VERSION; } static void run_ticker(struct PgDatabase *db) { const char *q = "select pgq.ticker()"; log_noise("%s: %s", db->name, q); pgs_send_query_simple(db->c_ticker, q); db->state = DB_TICKER_RUN; } static void close_ticker(struct PgDatabase *db, double sleep_time) { log_debug("%s: close_ticker, %f", db->name, sleep_time); db->state = DB_CLOSED; pgs_reconnect(db->c_ticker, sleep_time); } static void parse_pgq_check(struct PgDatabase *db, PGresult *res) { db->has_pgq = PQntuples(res) == 1; if (!db->has_pgq) { log_debug("%s: no pgq", db->name); close_ticker(db, cf.check_period); } else { run_version_check(db); } } static void parse_version_check(struct PgDatabase *db, PGresult *res) { char *ver; if (PQntuples(res) != 1) { log_debug("%s: calling pgq.version() failed", db->name); goto badpgq; } ver = PQgetvalue(res, 0, 0); if (ver[0] < '3') { log_debug("%s: bad pgq version: %s", db->name, ver); goto badpgq; } log_info("%s: pgq version ok: %s", db->name, ver); run_ticker(db); if (!db->c_maint) launch_maint(db); if (!db->c_retry) launch_retry(db); return; badpgq: db->has_pgq = false; log_info("%s: bad pgq version, ignoring", db->name); close_ticker(db, cf.check_period); } static void parse_ticker_result(struct PgDatabase *db, PGresult *res) { if (PQntuples(res) != 1) { log_debug("%s: calling pgq.ticker() failed", db->name); } else { stats.n_ticks++; } pgs_sleep(db->c_ticker, cf.ticker_period); } static void tick_handler(struct PgSocket *s, void *arg, enum PgEvent ev, PGresult *res) { struct PgDatabase *db = arg; ExecStatusType st; switch (ev) { case PGS_CONNECT_OK: run_pgq_check(db); break; case PGS_RESULT_OK: if (PQresultStatus(res) != PGRES_TUPLES_OK) { close_ticker(db, 10); break; } switch (db->state) { case DB_TICKER_CHECK_PGQ: parse_pgq_check(db, res); break; case DB_TICKER_CHECK_VERSION: parse_version_check(db, res); break; case DB_TICKER_RUN: parse_ticker_result(db, res); break; case DB_CLOSED: st = PQresultStatus(res); log_warning("%s: Weird state: RESULT_OK + DB_CLOSED (%s)", db->name, PQresStatus(st)); close_ticker(db, 10); break; default: log_warning("%s: bad state: %d", db->name, db->state); close_ticker(db, 10); } break; case PGS_TIMEOUT: log_noise("%s: tick timeout", db->name); if (!pgs_connection_valid(db->c_ticker)) launch_ticker(db); else run_ticker(db); break; default: log_warning("%s: default timeout", db->name); pgs_reconnect(db->c_ticker, 60); } } void launch_ticker(struct PgDatabase *db) { log_debug("%s: launch_ticker", db->name); if (!db->c_ticker) { const char *cstr = make_connstr(db->name); db->c_ticker = pgs_create(cstr, tick_handler, db); pgs_set_lifetime(db->c_ticker, cf.connection_lifetime); } pgs_connect(db->c_ticker); } skytools-3.2.6/sql/ticker/pgqd.h0000644000000000000000000000263512426435645013504 0ustar #ifndef __PGQD_H__ #define __PGQD_H__ #include #define Assert(x) #include #include #include #include enum DbState { DB_CLOSED, DB_TICKER_CHECK_PGQ, DB_TICKER_CHECK_VERSION, DB_TICKER_RUN, DB_MAINT_TEST_VERSION, DB_MAINT_LOAD_OPS, DB_MAINT_OP, DB_MAINT_LOAD_QUEUES, DB_MAINT_ROT1, DB_MAINT_ROT2, DB_MAINT_VACUUM_LIST, DB_MAINT_DO_VACUUM, }; struct MaintOp; struct PgDatabase { struct List head; const char *name; struct PgSocket *c_ticker; struct PgSocket *c_maint; struct PgSocket *c_retry; bool has_pgq; enum DbState state; enum DbState maint_state; bool dropped; struct StrList *maint_item_list; struct StatList maint_op_list; struct MaintOp *cur_maint; bool has_maint_operations; }; struct Config { const char *config_file; const char *pidfile; const char *base_connstr; const char *initial_database; const char *database_list; double retry_period; double check_period; double maint_period; double ticker_period; double stats_period; double connection_lifetime; }; struct Stats { int n_ticks; int n_maint; int n_retry; }; extern struct Config cf; extern struct Stats stats; void launch_ticker(struct PgDatabase *db); void launch_maint(struct PgDatabase *db); void launch_retry(struct PgDatabase *db); void free_maint(struct PgDatabase *db); const char *make_connstr(const char *dbname); #endif skytools-3.2.6/sql/ticker/pgqd.ini0000644000000000000000000000110312426435645014021 0ustar [pgqd] # where to log logfile = ~/log/pgqd.log # pidfile pidfile = ~/pid/pgqd.pid ## optional parameters ## # libpq connect string without dbname= #base_connstr = # startup db to query other databases #initial_database = template1 # limit ticker to specific databases #database_list = # log into syslog #syslog = 1 #syslog_ident = pgqd ## optional timeouts ## # how often to check for new databases #check_period = 60 # how often to flush retry queue #retry_period = 30 # how often to do maintentance #maint_period = 120 # how often to run ticker #ticker_period = 1 skytools-3.2.6/sql/ticker/maint.c0000644000000000000000000001734412426435645013657 0ustar #include "pgqd.h" #include #include #include struct MaintOp { struct List head; const char *func_name; const char *func_arg; }; static struct MaintOp *next_op(struct PgDatabase *db) { struct List *el = statlist_pop(&db->maint_op_list); if (!el) return NULL; return container_of(el, struct MaintOp, head); } static void free_op(struct MaintOp *op) { if (op) { free(op->func_name); free(op->func_arg); free(op); } } void free_maint(struct PgDatabase *db) { struct MaintOp *op; strlist_free(db->maint_item_list); db->maint_item_list = NULL; while ((op = next_op(db)) != NULL) { free_op(op); } free_op(db->cur_maint); db->cur_maint = NULL; } static void close_maint(struct PgDatabase *db, double sleep_time) { log_debug("%s: close_maint, %f", db->name, sleep_time); db->maint_state = DB_CLOSED; pgs_reconnect(db->c_maint, sleep_time); } static void run_test_version(struct PgDatabase *db) { const char *q = "select 1 from pg_proc p, pg_namespace n" " where p.pronamespace = n.oid" " and p.proname = 'maint_operations'" " and n.nspname = 'pgq'"; log_debug("%s: %s", db->name, q); pgs_send_query_simple(db->c_maint, q); db->maint_state = DB_MAINT_TEST_VERSION; } static bool has_ops(PGresult *res) { if (PQntuples(res) == 1 && atoi(PQgetvalue(res, 0, 0)) == 1) return true; return false; } static bool fill_op_list(struct PgDatabase *db, PGresult *res) { int i; struct MaintOp *op = NULL; const char *fname, *farg; free_maint(db); for (i = 0; i < PQntuples(res); i++) { op = calloc(1, sizeof(*op)); if (!op) return false; list_init(&op->head); fname = PQgetvalue(res, i, 0); farg = NULL; if (!PQgetisnull(res, i, 1)) farg = PQgetvalue(res, i, 1); log_debug("load_op: %s / %s", fname, farg ? farg : "NULL"); op->func_name = strdup(fname); if (!op->func_name) goto failed; if (farg) { op->func_arg = strdup(farg); if (!op->func_arg) goto failed; } statlist_append(&db->maint_op_list, &op->head); } return true; failed: free_op(op); return false; } static void run_op_list(struct PgDatabase *db) { const char *q = "select func_name, func_arg from pgq.maint_operations()"; log_debug("%s: %s", db->name, q); pgs_send_query_simple(db->c_maint, q); db->maint_state = DB_MAINT_LOAD_OPS; } static const char *stmt_names[] = { "vacuum", "vacuum analyze", NULL }; static void run_op(struct PgDatabase *db, PGresult *res) { struct MaintOp *op; char buf[1024]; char namebuf[256]; const char **np; if (db->cur_maint) { if (res && PQntuples(res) > 0) { const char *val = PQgetvalue(res, 0, 0); if (val && atoi(val)) { op = db->cur_maint; goto repeat; } } next: free_op(db->cur_maint); db->cur_maint = NULL; } op = next_op(db); if (!op) { stats.n_maint++; close_maint(db, cf.maint_period); return; } db->cur_maint = op; repeat: /* check if its magic statement */ for (np = stmt_names; *np; np++) { if (strcasecmp(op->func_name, *np) != 0) continue; if (!pg_quote_fqident(namebuf, op->func_arg, sizeof(namebuf))) { log_error("Bad table name? - %s", op->func_arg); goto next; } /* run as a statement */ snprintf(buf, sizeof(buf), "%s %s", op->func_name, namebuf); log_debug("%s: [%s]", db->name, buf); pgs_send_query_simple(db->c_maint, buf); goto done; } /* run as a function */ if (!pg_quote_fqident(namebuf, op->func_name, sizeof(namebuf))) { log_error("Bad func name? - %s", op->func_name); goto next; } if (op->func_arg) { snprintf(buf, sizeof(buf), "select %s($1)", namebuf); log_debug("%s: [%s]", db->name, buf); pgs_send_query_params(db->c_maint, buf, 1, op->func_arg); } else { snprintf(buf, sizeof(buf), "select %s()", namebuf); log_debug("%s: [%s]", db->name, buf); pgs_send_query_simple(db->c_maint, buf); } done: db->maint_state = DB_MAINT_OP; } static bool fill_items(struct PgDatabase *db, PGresult *res) { int i; if (db->maint_item_list) strlist_free(db->maint_item_list); db->maint_item_list = strlist_new(USUAL_ALLOC); if (!db->maint_item_list) return false; for (i = 0; i < PQntuples(res); i++) { const char *item = PQgetvalue(res, i, 0); if (item) if (!strlist_append(db->maint_item_list, item)) return false; } return true; } static void run_queue_list(struct PgDatabase *db) { const char *q = "select queue_name from pgq.get_queue_info()"; log_debug("%s: %s", db->name, q); pgs_send_query_simple(db->c_maint, q); db->maint_state = DB_MAINT_LOAD_QUEUES; } static void run_vacuum_list(struct PgDatabase *db) { const char *q = "select * from pgq.maint_tables_to_vacuum()"; log_debug("%s: %s", db->name, q); pgs_send_query_simple(db->c_maint, q); db->maint_state = DB_MAINT_VACUUM_LIST; } static void run_rotate_part1(struct PgDatabase *db) { const char *q; const char *qname; qname = strlist_pop(db->maint_item_list); q = "select pgq.maint_rotate_tables_step1($1)"; log_debug("%s: %s [%s]", db->name, q, qname); pgs_send_query_params(db->c_maint, q, 1, qname); free(qname); db->maint_state = DB_MAINT_ROT1; } static void run_rotate_part2(struct PgDatabase *db) { const char *q = "select pgq.maint_rotate_tables_step2()"; log_debug("%s: %s", db->name, q); pgs_send_query_simple(db->c_maint, q); db->maint_state = DB_MAINT_ROT2; } static void run_vacuum(struct PgDatabase *db) { char qbuf[256]; const char *table; table = strlist_pop(db->maint_item_list); snprintf(qbuf, sizeof(qbuf), "vacuum %s", table); log_debug("%s: %s", db->name, qbuf); pgs_send_query_simple(db->c_maint, qbuf); free(table); db->maint_state = DB_MAINT_DO_VACUUM; } static void maint_handler(struct PgSocket *s, void *arg, enum PgEvent ev, PGresult *res) { struct PgDatabase *db = arg; switch (ev) { case PGS_CONNECT_OK: log_debug("%s: starting maintenance", db->name); if (db->has_maint_operations) run_op_list(db); else run_test_version(db); break; case PGS_RESULT_OK: if (PQresultStatus(res) != PGRES_TUPLES_OK) { close_maint(db, 20); return; } switch (db->maint_state) { case DB_MAINT_TEST_VERSION: if (has_ops(res)) { db->has_maint_operations = true; run_op_list(db); } else { run_queue_list(db); } break; case DB_MAINT_LOAD_OPS: if (!fill_op_list(db, res)) goto mem_err; case DB_MAINT_OP: run_op(db, res); break; case DB_MAINT_LOAD_QUEUES: if (!fill_items(db, res)) goto mem_err; case DB_MAINT_ROT1: if (!strlist_empty(db->maint_item_list)) { run_rotate_part1(db); } else { run_rotate_part2(db); } break; case DB_MAINT_ROT2: run_vacuum_list(db); break; case DB_MAINT_VACUUM_LIST: if (!fill_items(db, res)) goto mem_err; case DB_MAINT_DO_VACUUM: if (!strlist_empty(db->maint_item_list)) { run_vacuum(db); } else { close_maint(db, cf.maint_period); } break; default: fatal("bad state"); } break; case PGS_TIMEOUT: log_debug("%s: maint timeout", db->name); if (!pgs_connection_valid(db->c_maint)) launch_maint(db); else run_queue_list(db); break; default: log_warning("%s: default reconnect", db->name); pgs_reconnect(db->c_maint, 60); } return; mem_err: if (db->maint_item_list) { strlist_free(db->maint_item_list); db->maint_item_list = NULL; } pgs_disconnect(db->c_maint); pgs_sleep(db->c_maint, 20); } void launch_maint(struct PgDatabase *db) { const char *cstr; log_debug("%s: launch_maint", db->name); if (!db->c_maint) { if (db->maint_item_list) { strlist_free(db->maint_item_list); db->maint_item_list = NULL; } cstr = make_connstr(db->name); db->c_maint = pgs_create(cstr, maint_handler, db); } if (!pgs_connection_valid(db->c_maint)) { pgs_connect(db->c_maint); } else { /* Already have a connection, what are we doing here */ log_error("%s: maint already initialized", db->name); return; } } skytools-3.2.6/sql/ticker/Makefile0000644000000000000000000000130012426435645014024 0ustar include ../../config.mak PG_INCDIR = $(shell $(PG_CONFIG) --includedir) PG_LIBDIR = $(shell $(PG_CONFIG) --libdir) bin_PROGRAMS = pgqd pgqd_SOURCES = pgqd.c maint.c ticker.c retry.c pgqd.h nodist_pgqd_SOURCES = pgqd.ini.h pgqd_CPPFLAGS = -I$(PG_INCDIR) pgqd_LDFLAGS = -L$(PG_LIBDIR) pgqd_LIBS = -lpq -lm pgqd_EMBED_LIBUSUAL = 1 USUAL_DIR = ../../lib AM_FEATURES = libusual EXTRA_DIST = pgqd.ini CLEANFILES = pgqd.ini.h include $(USUAL_DIR)/mk/antimake.mk pgqd.ini.h: pgqd.ini sed -e 's/.*/"&\\n"/' $< > $@ install: install-conf install-conf: mkdir -p '$(DESTDIR)$(docdir)/conf' $(INSTALL) -m 644 pgqd.ini '$(DESTDIR)$(docdir)/conf/pgqd.ini.templ' tags: ctags *.[ch] ../../lib/usual/*.[ch] skytools-3.2.6/sql/ticker/pgqd.c0000644000000000000000000002143112426435645013472 0ustar #include "pgqd.h" #include #include #include #include #include #include #include static void detect_dbs(void); static void recheck_dbs(void); static const char usage_str[] = "usage: pgq-ticker [switches] config.file\n" "Switches:\n" " -v Increase verbosity\n" " -q No output to console\n" " -d Daemonize\n" " -h Show help\n" " -V Show version\n" " --ini Show sample config file\n" " -s Stop - send SIGINT to running process\n" " -k Kill - send SIGTERM to running process\n" #ifdef SIGHUP " -r Reload - send SIGHUP to running process\n" #endif ""; static const char *sample_ini = #include "pgqd.ini.h" ; struct Config cf; struct Stats stats; static struct PgSocket *db_template; static STATLIST(database_list); static int got_sigint; #define CF_REL_BASE struct Config static const struct CfKey conf_params[] = { CF_ABS("logfile", CF_FILE, cf_logfile, 0, NULL), CF_REL("pidfile", CF_FILE, pidfile, 0, NULL), CF_REL("initial_database", CF_STR, initial_database, 0, "template1"), CF_REL("base_connstr", CF_STR, base_connstr, 0, ""), CF_REL("database_list", CF_STR, database_list, 0, NULL), CF_ABS("syslog", CF_INT, cf_syslog, 0, "1"), CF_ABS("syslog_ident", CF_STR, cf_syslog_ident, 0, "pgqd"), CF_ABS("syslog_facility", CF_STR, cf_syslog_facility, 0, "daemon"), CF_REL("check_period", CF_TIME_DOUBLE, check_period, 0, "60"), CF_REL("maint_period", CF_TIME_DOUBLE, maint_period, 0, "120"), CF_REL("retry_period", CF_TIME_DOUBLE, retry_period, 0, "30"), CF_REL("ticker_period", CF_TIME_DOUBLE, ticker_period, 0, "1"), CF_REL("stats_period", CF_TIME_DOUBLE, stats_period, 0, "30"), CF_REL("connection_lifetime", CF_TIME_DOUBLE, connection_lifetime, 0, "3600"), { NULL }, }; static const struct CfSect conf_sects[] = { { "pgqd", conf_params }, { NULL } }; static struct CfContext conf_info = { .sect_list = conf_sects, .base = &cf, }; static void load_config(void) { bool ok = cf_load_file(&conf_info, cf.config_file); if (!ok) fatal("failed to read config"); reset_logging(); } static void handle_sigterm(int sock, short flags, void *arg) { log_info("Got SIGTERM, fast exit"); /* pidfile cleanup happens via atexit() */ exit(1); } static void handle_sigint(int sock, short flags, void *arg) { log_info("Got SIGINT, shutting down"); /* notify main loop to exit */ got_sigint = 1; } static void handle_sighup(int sock, short flags, void *arg) { log_info("Got SIGHUP, re-reading config"); load_config(); recheck_dbs(); } static void signal_setup(void) { static struct event ev_sighup; static struct event ev_sigterm; static struct event ev_sigint; int err; #ifdef SIGPIPE sigset_t set; /* block SIGPIPE */ sigemptyset(&set); sigaddset(&set, SIGPIPE); err = sigprocmask(SIG_BLOCK, &set, NULL); if (err < 0) fatal_perror("sigprocmask"); #endif #ifdef SIGHUP /* catch signals */ signal_set(&ev_sighup, SIGHUP, handle_sighup, NULL); err = signal_add(&ev_sighup, NULL); if (err < 0) fatal_perror("signal_add"); #endif signal_set(&ev_sigterm, SIGTERM, handle_sigterm, NULL); err = signal_add(&ev_sigterm, NULL); if (err < 0) fatal_perror("signal_add"); signal_set(&ev_sigint, SIGINT, handle_sigint, NULL); err = signal_add(&ev_sigint, NULL); if (err < 0) fatal_perror("signal_add"); } const char *make_connstr(const char *dbname) { static char buf[512]; snprintf(buf, sizeof(buf), "%s dbname=%s ", cf.base_connstr, dbname); return buf; } static void launch_db(const char *dbname) { struct PgDatabase *db; struct List *elem; /* check of already exists */ statlist_for_each(elem, &database_list) { db = container_of(elem, struct PgDatabase, head); if (strcmp(db->name, dbname) == 0) { db->dropped = false; return; } } /* create new db entry */ db = calloc(1, sizeof(*db)); db->name = strdup(dbname); list_init(&db->head); statlist_init(&db->maint_op_list, "maint_op_list"); statlist_append(&database_list, &db->head); /* start working on it */ launch_ticker(db); } static void drop_db(struct PgDatabase *db, bool log) { if (log) log_info("Unregister database: %s", db->name); statlist_remove(&database_list, &db->head); pgs_free(db->c_ticker); pgs_free(db->c_maint); pgs_free(db->c_retry); free_maint(db); free(db->name); free(db); } static void detect_handler(struct PgSocket *sk, void *arg, enum PgEvent ev, PGresult *res) { int i; const char *s; struct List *el, *tmp; struct PgDatabase *db; switch (ev) { case PGS_CONNECT_OK: pgs_send_query_simple(sk, "select datname from pg_database" " where not datistemplate and datallowconn"); break; case PGS_RESULT_OK: /* tag old dbs as dead */ statlist_for_each(el, &database_list) { db = container_of(el, struct PgDatabase, head); db->dropped = true; } /* process new dbs */ for (i = 0; i < PQntuples(res); i++) { s = PQgetvalue(res, i, 0); launch_db(s); } /* drop old dbs */ statlist_for_each_safe(el, &database_list, tmp) { db = container_of(el, struct PgDatabase, head); if (db->dropped) drop_db(db, true); } pgs_disconnect(sk); pgs_sleep(sk, cf.check_period); break; case PGS_TIMEOUT: detect_dbs(); break; default: pgs_disconnect(sk); pgs_sleep(sk, cf.check_period); } } static void detect_dbs(void) { if (!db_template) { const char *cstr = make_connstr(cf.initial_database); db_template = pgs_create(cstr, detect_handler, NULL); } pgs_connect(db_template); } static bool launch_db_cb(void *arg, const char *db) { launch_db(db); return true; } static void recheck_dbs(void) { struct PgDatabase *db; struct List *el, *tmp; if (cf.database_list && cf.database_list[0]) { /* tag old dbs as dead */ statlist_for_each(el, &database_list) { db = container_of(el, struct PgDatabase, head); db->dropped = true; } /* process new ones */ if (!parse_word_list(cf.database_list, launch_db_cb, NULL)) { log_warning("database_list parsing failed: %s", strerror(errno)); return; } /* drop old ones */ statlist_for_each_safe(el, &database_list, tmp) { db = container_of(el, struct PgDatabase, head); if (db->dropped) drop_db(db, true); } /* done with template for the moment */ if (db_template) { pgs_free(db_template); db_template = NULL; } } else if (!db_template) { log_info("auto-detecting dbs ..."); detect_dbs(); } } static struct event stats_ev; static void stats_handler(int fd, short flags, void *arg) { struct timeval tv = { cf.stats_period, 0 }; log_info("{ticks: %d, maint: %d, retry: %d}", stats.n_ticks, stats.n_maint, stats.n_retry); memset(&stats, 0, sizeof(stats)); if (evtimer_add(&stats_ev, &tv) < 0) fatal_perror("evtimer_add"); } static void stats_setup(void) { struct timeval tv = { cf.stats_period, 0 }; evtimer_set(&stats_ev, stats_handler, NULL); if (evtimer_add(&stats_ev, &tv) < 0) fatal_perror("evtimer_add"); } static void cleanup(void) { struct PgDatabase *db; struct List *elem, *tmp; statlist_for_each_safe(elem, &database_list, tmp) { db = container_of(elem, struct PgDatabase, head); drop_db(db, false); } pgs_free(db_template); event_base_free(NULL); reset_logging(); } static void main_loop_once(void) { reset_time_cache(); if (event_loop(EVLOOP_ONCE) != 0) { log_error("event_loop failed: %s", strerror(errno)); } } int main(int argc, char *argv[]) { int c; bool daemon = false; int sig = 0; const char *signame = NULL; for (c = 1; c < argc; c++) { if (!strcmp(argv[c], "--ini")) { printf("%s", sample_ini); exit(0); } if (!strcmp(argv[c], "--help")) { printf(usage_str); exit(0); } } while ((c = getopt(argc, argv, "dqvhVrsk")) != -1) { switch (c) { case 'd': daemon = true; break; case 'v': cf_verbose++; break; case 'q': cf_quiet = 1; break; case 'h': printf(usage_str); return 0; #ifdef SIGHUP case 'r': sig = SIGHUP; signame = "SIGHUP"; break; #endif case 's': sig = SIGINT; signame = "SIGINT"; break; case 'k': sig = SIGTERM; signame = "SIGTERM"; break; default: printf("bad switch: "); printf(usage_str); return 1; } } if (optind + 1 != argc) { fprintf(stderr, "pgqd requires config file\n"); return 1; } cf.config_file = argv[optind]; load_config(); conf_info.loaded = true; if (sig) { if (!cf.pidfile || !cf.pidfile[0]) { fprintf(stderr, "No pidfile configured\n"); return 1; } if (signal_pidfile(cf.pidfile, sig)) fprintf(stderr, "%s sent\n", signame); else fprintf(stderr, "Old process is not running\n"); return 0; } log_info("Starting pgqd " PACKAGE_VERSION); daemonize(cf.pidfile, daemon); if (!event_init()) fatal("event_init failed"); signal_setup(); stats_setup(); recheck_dbs(); while (!got_sigint) main_loop_once(); cleanup(); return 0; } skytools-3.2.6/sql/ticker/retry.c0000644000000000000000000000275712426435645013716 0ustar #include "pgqd.h" static void close_retry(struct PgDatabase *db, double sleep_time) { log_debug("%s: close_retry, %f", db->name, sleep_time); pgs_reconnect(db->c_retry, sleep_time); } static void run_retry(struct PgDatabase *db) { const char *q = "select * from pgq.maint_retry_events()"; log_debug("%s: %s", db->name, q); pgs_send_query_simple(db->c_retry, q); } static void parse_retry(struct PgDatabase *db, PGresult *res) { if (PQntuples(res) == 1) { char *val = PQgetvalue(res, 0, 0); stats.n_retry += atoi(val); if (strcmp(val, "0") != 0) { run_retry(db); return; } } close_retry(db, cf.retry_period); } static void retry_handler(struct PgSocket *s, void *arg, enum PgEvent ev, PGresult *res) { struct PgDatabase *db = arg; switch (ev) { case PGS_CONNECT_OK: log_debug("%s: starting retry event processing", db->name); run_retry(db); break; case PGS_RESULT_OK: if (PQresultStatus(res) != PGRES_TUPLES_OK) close_retry(db, 20); else parse_retry(db, res); break; case PGS_TIMEOUT: log_debug("%s: retry timeout", db->name); launch_retry(db); break; default: log_warning("%s: default reconnect", db->name); pgs_reconnect(db->c_retry, 30); } } void launch_retry(struct PgDatabase *db) { const char *cstr; if (db->c_retry) { log_debug("%s: retry already initialized", db->name); } else { log_debug("%s: launch_retry", db->name); cstr = make_connstr(db->name); db->c_retry = pgs_create(cstr, retry_handler, db); } pgs_connect(db->c_retry); } skytools-3.2.6/old/0000755000000000000000000000000012426435645011070 5ustar skytools-3.2.6/old/simple_serial_consumer.py0000755000000000000000000000473412426435645016220 0ustar #! /usr/bin/env python """simple serial consumer for skytools3 it consumes events from a predefined queue and feeds them to a sql statement Config template:: [simple_serial_consumer] job_name = descriptive_name_for_job src_db = dbname=sourcedb_test dst_db = dbname=destdb port=1234 host=dbhost.com username=guest password=secret pgq_queue_name = source_queue logfile = ~/log/%(job_name)s.log pidfile = ~/pid/%(job_name)s.pid dst_query = select 1 use_skylog = 0 """ """Config example:: Create a queue named "echo_queue" in a database (like "testdb") Register consumer "echo" to this queue Start the echo consumer with config file shown below (You may want to use -v to see, what will happen) From some other window, insert something into the queue: select pgq.insert_event('echo_queue','type','hello=world'); Enjoy the ride :) If dst_query is set to "select 1" then echo consumer becomes a sink consumer [simple_serial_consumer] job_name = echo src_db = dbname=testdb dst_db = dbname=testdb pgq_queue_name = echo_queue logfile = ~/log/%(job_name)s.log pidfile = ~/pid/%(job_name)s.pid dst_query = select * from pgq.insert_event('echo_queue', %%(pgq.ev_type)s, %%(pgq.ev_data)s) """ import sys, pgq, skytools skytools.sane_config = 1 class SimpleSerialConsumer(pgq.SerialConsumer): doc_string = __doc__ def __init__(self, args): pgq.SerialConsumer.__init__(self,"simple_serial_consumer","src_db","dst_db", args) self.dst_query = self.cf.get("dst_query") def process_remote_batch(self, db, batch_id, event_list, dst_db): curs = dst_db.cursor() for ev in event_list: payload = skytools.db_urldecode(ev.data) if payload is None: payload = {} payload['pgq.ev_type'] = ev.type payload['pgq.ev_data'] = ev.data payload['pgq.ev_id'] = ev.id payload['pgq.ev_time'] = ev.time payload['pgq.ev_extra1'] = ev.extra1 payload['pgq.ev_extra2'] = ev.extra2 payload['pgq.ev_extra3'] = ev.extra3 payload['pgq.ev_extra4'] = ev.extra4 self.log.debug(self.dst_query % payload) curs.execute(self.dst_query, payload) try: res = curs.fetchone() self.log.debug(res) except: pass if __name__ == '__main__': script = SimpleSerialConsumer(sys.argv[1:]) script.start() skytools-3.2.6/old/cube_dispatcher.txt0000644000000000000000000000722512426435645014763 0ustar = cube_dispatcher(1) = == NAME == cube_dispatcher - PgQ consumer that is used to write source records into partitoned tables == SYNOPSIS == cube_dispatcher.py [switches] config.ini == DESCRIPTION == cube_dispatcher is PgQ consumer that reads url encoded records from source queue and writes them into partitioned tables according to configuration file. Used to prepare data for business intelligence. Name of the table is read from producer field in event. Batch creation time is used for partitioning. All records created in same day will go into same table partion. If partiton does not exist cube dispatcer will create it according to template. Events are usually procuded by `pgq.logutriga()`. Logutriga adds all the data of the record into the event (also in case of updates and deletes). `cube_dispatcher` can be used in to modes: keep_all:: keeps all the data that comes in. If record is updated several times during one day then table partiton for that day will contain several instances of that record. keep_latest:: only last instance of each record is kept for each day. That also means that all tables must have primary keys so cube dispatcher can delete previous versions of records before inserting new data. == QUICK-START == Basic cube_dispatcher setup and usage can be summarized by the following steps: 1. pgq and logutriga must be installed in source databases. See pgqadm man page for details. target database must also have pgq_ext schema. 2. edit a cube_dispatcher configuration file, say cube_dispatcher_sample.ini 3. create source queue $ pgqadm.py ticker.ini create 4. create target database and parent tables in it. 5. launch cube dispatcher in daemon mode $ cube_dispatcher.py cube_dispatcher_sample.ini -d 6. start producing events (create logutriga trggers on tables) CREATE OR REPLACE TRIGGER trig_cube_replica AFTER INSERT OR UPDATE ON some_table FOR EACH ROW EXECUTE PROCEDURE pgq.logutriga('') == CONFIG == include::common.config.txt[] === Config options specific to `cube_dispatcher` === src_db:: Connect string for source database where the queue resides. dst_db:: Connect string for target database where the tables should be created. mode:: Operation mode for cube_dispatcher. Either `keep_all` or `keep_latest`. dateformat:: Optional parameter to specify how to suffix data tables. Default is `YYYY_MM_DD` which creates per-day tables. With `YYYY_MM` per-month tables can be created. If explicitly set empty, partitioning is disabled. part_template:: SQL fragment for table creation. Various magic replacements are done there: _PKEY:: comma separated list of primery key columns. _PARENT:: schema-qualified parent table name. _DEST_TABLE:: schema-qualified partition table. _SCHEMA_TABLE:: same as _DEST_TABLE but dots replaced with "__", to allow use as index names. === Example config file === [cube_dispatcher] job_name = some_queue_to_cube src_db = dbname=sourcedb_test dst_db = dbname=dataminedb_test pgq_queue_name = udata.some_queue logfile = ~/log/%(job_name)s.log pidfile = ~/pid/%(job_name)s.pid # how many rows are kept: keep_latest, keep_all mode = keep_latest # to_char() fmt for table suffix #dateformat = YYYY_MM_DD # following disables table suffixes: #dateformat = part_template = create table _DEST_TABLE (like _PARENT); alter table only _DEST_TABLE add primary key (_PKEY); == LOGUTRIGA EVENT FORMAT == include::common.logutriga.txt[] == COMMAND LINE SWITCHES == include::common.switches.txt[] skytools-3.2.6/old/table_dispatcher.py0000755000000000000000000001131712426435645014745 0ustar #! /usr/bin/env python """It loads urlencoded rows for one trable from queue and inserts them into actual tables, with optional partitioning. --ini [table_dispatcher] job_name = test_move src_db = dbname=sourcedb_test dst_db = dbname=dataminedb_test pgq_queue_name = OrderLog logfile = ~/log/%(job_name)s.log pidfile = ~/pid/%(job_name)s.pid # where to put data. when partitioning, will be used as base name dest_table = orders # date field with will be used for partitioning # special value: _EVTIME - event creation time part_column = start_date #fields = * #fields = id, name #fields = id:newid, name, bar:baz # template used for creating partition tables # _DEST_TABLE part_template = create table _DEST_TABLE () inherits (orders); alter table only _DEST_TABLE add constraint _DEST_TABLE_pkey primary key (id); grant select on _DEST_TABLE to group reporting; """ import sys, os, pgq, skytools DEST_TABLE = "_DEST_TABLE" SCHEMA_TABLE = "_SCHEMA_TABLE" class TableDispatcher(pgq.SerialConsumer): """Single-table partitioner.""" def __init__(self, args): pgq.SerialConsumer.__init__(self, "table_dispatcher", "src_db", "dst_db", args) self.part_template = self.cf.get("part_template", '') self.dest_table = self.cf.get("dest_table") self.part_field = self.cf.get("part_field", '') self.part_method = self.cf.get("part_method", 'daily') if self.part_method not in ('daily', 'monthly'): raise Exception('bad part_method') if self.cf.get("fields", "*") == "*": self.field_map = None else: self.field_map = {} for fval in self.cf.getlist('fields'): tmp = fval.split(':') if len(tmp) == 1: self.field_map[tmp[0]] = tmp[0] else: self.field_map[tmp[0]] = tmp[1] def process_remote_batch(self, src_db, batch_id, ev_list, dst_db): # actual processing self.dispatch(dst_db, ev_list) def dispatch(self, dst_db, ev_list): """Generic dispatcher.""" # load data tables = {} for ev in ev_list: row = skytools.db_urldecode(ev.data) # guess dest table if self.part_field: if self.part_field == "_EVTIME": partval = str(ev.creation_date) else: partval = str(row[self.part_field]) partval = partval.split(' ')[0] date = partval.split('-') if self.part_method == 'monthly': date = date[:2] suffix = '_'.join(date) tbl = "%s_%s" % (self.dest_table, suffix) else: tbl = self.dest_table # map fields if self.field_map is None: dstrow = row else: dstrow = {} for k, v in self.field_map.items(): dstrow[v] = row[k] # add row into table if not tbl in tables: tables[tbl] = [dstrow] else: tables[tbl].append(dstrow) # create tables if needed self.check_tables(dst_db, tables) # insert into data tables curs = dst_db.cursor() for tbl, tbl_rows in tables.items(): skytools.magic_insert(curs, tbl, tbl_rows) def check_tables(self, dcon, tables): """Checks that tables needed for copy are there. If not then creates them. Used by other procedures to ensure that table is there before they start inserting. The commits should not be dangerous, as we haven't done anything with cdr's yet, so they should still be in one TX. Although it would be nicer to have a lock for table creation. """ dcur = dcon.cursor() for tbl in tables.keys(): if not skytools.exists_table(dcur, tbl): if not self.part_template: raise Exception('Dest table does not exists and no way to create it.') sql = self.part_template sql = sql.replace(DEST_TABLE, skytools.quote_fqident(tbl)) # we do this to make sure that constraints for # tables who contain a schema will still work schema_table = tbl.replace(".", "__") sql = sql.replace(SCHEMA_TABLE, skytools.quote_ident(schema_table)) dcur.execute(sql) dcon.commit() self.log.info('%s: Created table %s' % (self.job_name, tbl)) if __name__ == '__main__': script = TableDispatcher(sys.argv[1:]) script.start() skytools-3.2.6/old/cube_dispatcher.py0000755000000000000000000001466312426435645014603 0ustar #! /usr/bin/env python """It accepts urlencoded rows for multiple tables from queue and insert them into actual tables, with partitioning on tick time. Config template:: [cube_dispatcher] job_name = cd_srcdb_queue_to_dstdb_dstcolo.ini src_db = dbname=sourcedb_test dst_db = dbname=dataminedb_test pgq_queue_name = udata.some_queue logfile = ~/log/%(job_name)s.log pidfile = ~/pid/%(job_name)s.pid # how many rows are kept: keep_latest, keep_all mode = keep_latest # to_char() fmt for table suffix #dateformat = YYYY_MM_DD # following disables table suffixes: #dateformat = part_template = create table _DEST_TABLE (like _PARENT); alter table only _DEST_TABLE add primary key (_PKEY); grant select on _DEST_TABLE to postgres; """ import sys, os, pgq, skytools DEF_CREATE = """ create table _DEST_TABLE (like _PARENT); alter table only _DEST_TABLE add primary key (_PKEY); """ class CubeDispatcher(pgq.SerialConsumer): __doc__ = __doc__ def __init__(self, args): pgq.SerialConsumer.__init__(self, "cube_dispatcher", "src_db", "dst_db", args) self.dateformat = self.cf.get('dateformat', 'YYYY_MM_DD') self.part_template = self.cf.get('part_template', DEF_CREATE) mode = self.cf.get('mode', 'keep_latest') if mode == 'keep_latest': self.keep_latest = 1 elif mode == 'keep_all': self.keep_latest = 0 else: self.log.fatal('wrong mode setting') sys.exit(1) def get_part_date(self, batch_id): if not self.dateformat: return None # fetch and format batch date src_db = self.get_database('src_db') curs = src_db.cursor() q = 'select to_char(batch_end, %s) from pgq.get_batch_info(%s)' curs.execute(q, [self.dateformat, batch_id]) src_db.commit() return curs.fetchone()[0] def process_remote_batch(self, src_db, batch_id, ev_list, dst_db): # actual processing self.dispatch(dst_db, ev_list, self.get_part_date(batch_id)) def dispatch(self, dst_db, ev_list, date_str): """Actual event processing.""" # get tables and sql tables = {} sql_list = [] for ev in ev_list: if date_str: tbl = "%s_%s" % (ev.extra1, date_str) else: tbl = ev.extra1 sql = self.make_sql(tbl, ev) sql_list.append(sql) if not tbl in tables: tables[tbl] = self.get_table_info(ev, tbl) # create tables if needed self.check_tables(dst_db, tables) # insert into data tables curs = dst_db.cursor() block = [] for sql in sql_list: self.log.debug(sql) block.append(sql) if len(block) > 100: curs.execute("\n".join(block)) block = [] if len(block) > 0: curs.execute("\n".join(block)) def get_table_info(self, ev, tbl): klist = [skytools.quote_ident(k) for k in ev.key_list.split(',')] inf = { 'parent': ev.extra1, 'table': tbl, 'key_list': ",".join(klist), } return inf def make_sql(self, tbl, ev): """Return SQL statement(s) for that event.""" # parse data data = skytools.db_urldecode(ev.data) # parse tbl info if ev.type.find(':') > 0: op, keys = ev.type.split(':') else: op = ev.type keys = ev.extra2 ev.key_list = keys key_list = keys.split(',') if self.keep_latest and len(key_list) == 0: raise Exception('No pkey on table %s' % tbl) # generate sql if op in ('I', 'U'): if self.keep_latest: sql = "%s %s" % (self.mk_delete_sql(tbl, key_list, data), self.mk_insert_sql(tbl, key_list, data)) else: sql = self.mk_insert_sql(tbl, key_list, data) elif op == "D": if not self.keep_latest: raise Exception('Delete op not supported if mode=keep_all') sql = self.mk_delete_sql(tbl, key_list, data) else: raise Exception('Unknown row op: %s' % op) return sql def mk_delete_sql(self, tbl, key_list, data): # generate delete command whe_list = [] for k in key_list: whe_list.append("%s = %s" % (skytools.quote_ident(k), skytools.quote_literal(data[k]))) whe_str = " and ".join(whe_list) return "delete from %s where %s;" % (skytools.quote_fqident(tbl), whe_str) def mk_insert_sql(self, tbl, key_list, data): # generate insert command col_list = [] val_list = [] for c, v in data.items(): col_list.append(skytools.quote_ident(c)) val_list.append(skytools.quote_literal(v)) col_str = ",".join(col_list) val_str = ",".join(val_list) return "insert into %s (%s) values (%s);" % ( skytools.quote_fqident(tbl), col_str, val_str) def check_tables(self, dcon, tables): """Checks that tables needed for copy are there. If not then creates them. Used by other procedures to ensure that table is there before they start inserting. The commits should not be dangerous, as we haven't done anything with cdr's yet, so they should still be in one TX. Although it would be nicer to have a lock for table creation. """ dcur = dcon.cursor() for tbl, inf in tables.items(): if skytools.exists_table(dcur, tbl): continue sql = self.part_template sql = sql.replace('_DEST_TABLE', skytools.quote_fqident(inf['table'])) sql = sql.replace('_PARENT', skytools.quote_fqident(inf['parent'])) sql = sql.replace('_PKEY', inf['key_list']) # be similar to table_dispatcher schema_table = inf['table'].replace(".", "__") sql = sql.replace('_SCHEMA_TABLE', skytools.quote_ident(schema_table)) dcur.execute(sql) dcon.commit() self.log.info('%s: Created table %s' % (self.job_name, tbl)) if __name__ == '__main__': script = CubeDispatcher(sys.argv[1:]) script.start() skytools-3.2.6/old/table_dispatcher.txt0000644000000000000000000000547112426435645015135 0ustar = table_dispatcher(1) = == NAME == table_dispatcher - PgQ consumer that is used to write source records into partitoned table. == SYNOPSIS == table_dispatcher.py [switches] config.ini == DESCRIPTION == table_dispatcher is PgQ consumer that reads url encoded records from source queue and writes them into partitioned tables according to configuration file. Used to partiton data. For example change log's that need to kept online only shortly can be written to daily tables and then dropped as they become irrelevant. Also allows to select which columns have to be written into target database Creates target tables according to configuration file as needed. == QUICK-START == Basic table_dispatcher setup and usage can be summarized by the following steps: 1. PgQ must be installed in source database. See pgqadm man page for details. Target database must have `pgq_ext` schema installed. 2. edit a table_dispatcher configuration file, say table_dispatcher_sample.ini 3. create source queue $ pgqadm.py ticker.ini create 4. launch table dispatcher in daemon mode $ table_dispatcher.py table_dispatcher_sample.ini -d 5. start producing events == CONFIG == include::common.config.txt[] === table_dispatcher parameters === src_db:: Source database. dst_db:: Target database. dest_table:: Where to put data. when partitioning, will be used as base name part_field:: date field with will be used for partitioning. part_template:: SQL code used to create partition tables. Various magic replacements are done there: _PKEY:: comma separated list of primery key columns. _PARENT:: schema-qualified parent table name. _DEST_TABLE:: schema-qualified partition table. _SCHEMA_TABLE:: same as _DEST_TABLE but dots replaced with "__", to allow use as index names. === Example config === [table_dispatcher] job_name = table_dispatcher_source_table_targetdb src_db = dbname=sourcedb dst_db = dbname=targetdb pgq_queue_name = sourceq logfile = log/%(job_name)s.log pidfile = pid/%(job_name)s.pid # where to put data. when partitioning, will be used as base name dest_table = orders # names of the fields that must be read from source records fields = id, order_date, customer_name # date field with will be used for partitioning part_field = order_date # template used for creating partition tables part_template = create table _DEST_TABLE () inherits (orders); alter table only _DEST_TABLE add constraint _DEST_TABLE_pkey primary key (id); grant select on _DEST_TABLE to group reporting; == COMMAND LINE SWITCHES == include::common.switches.txt[] == LOGUTRIGA EVENT FORMAT == include::common.logutriga.txt[] skytools-3.2.6/old/bulk_loader.py0000755000000000000000000003432012426435645013732 0ustar #! /usr/bin/env python """Bulkloader for slow databases (Bizgres). Idea is following: - Script reads from queue a batch of urlencoded row changes. Inserts/updates/deletes, maybe many per one row. - It creates 3 lists: ins_list, upd_list, del_list. If one row is changed several times, it keeps the latest. - Lists are processed in followin way: ins_list - COPY into main table upd_list - COPY into temp table, UPDATE from there del_list - COPY into temp table, DELETE from there - One side-effect is that total order of how rows appear changes, but per-row changes will be kept in order. The speedup from the COPY will happen only if the batches are large enough. So the ticks should happen only after couple of minutes. bl_sourcedb_queue_to_destdb.ini Config template:: [bulk_loader] # job name is optional when not given ini file name is used job_name = bl_sourcedb_queue_to_destdb src_db = dbname=sourcedb dst_db = dbname=destdb pgq_queue_name = source_queue use_skylog = 0 logfile = ~/log/%(job_name)s.log pidfile = ~/pid/%(job_name)s.pid # 0 - apply UPDATE as UPDATE # 1 - apply UPDATE as DELETE+INSERT # 2 - merge INSERT/UPDATE, do DELETE+INSERT load_method = 0 # no hurry loop_delay = 10 # table renaming # remap_tables = skypein_cdr_closed:skypein_cdr, tbl1:tbl2 """ import sys, os, pgq, skytools from skytools import quote_ident, quote_fqident ## several methods for applying data # update as update METH_CORRECT = 0 # update as delete/copy METH_DELETE = 1 # merge ins_list and upd_list, do delete/copy METH_MERGED = 2 # no good method for temp table check before 8.2 USE_LONGLIVED_TEMP_TABLES = False AVOID_BIZGRES_BUG = 1 def find_dist_fields(curs, fqtbl): if not skytools.exists_table(curs, "pg_catalog.mpp_distribution_policy"): return [] schema, name = fqtbl.split('.') q = "select a.attname"\ " from pg_class t, pg_namespace n, pg_attribute a,"\ " mpp_distribution_policy p"\ " where n.oid = t.relnamespace"\ " and p.localoid = t.oid"\ " and a.attrelid = t.oid"\ " and a.attnum = any(p.attrnums)"\ " and n.nspname = %s and t.relname = %s" curs.execute(q, [schema, name]) res = [] for row in curs.fetchall(): res.append(row[0]) return res def exists_temp_table(curs, tbl): # correct way, works only on 8.2 q = "select 1 from pg_class where relname = %s and relnamespace = pg_my_temp_schema()" # does not work with parallel case #q = """ #select 1 from pg_class t, pg_namespace n #where n.oid = t.relnamespace # and pg_table_is_visible(t.oid) # and has_schema_privilege(n.nspname, 'USAGE') # and has_table_privilege(n.nspname || '.' || t.relname, 'SELECT') # and substr(n.nspname, 1, 8) = 'pg_temp_' # and t.relname = %s; #""" curs.execute(q, [tbl]) tmp = curs.fetchall() return len(tmp) > 0 class TableCache: """Per-table data hander.""" def __init__(self, tbl): """Init per-batch table data cache.""" self.name = tbl self.ev_list = [] self.pkey_map = {} self.pkey_list = [] self.pkey_str = None self.col_list = None self.final_ins_list = [] self.final_upd_list = [] self.final_del_list = [] def add_event(self, ev): """Store new event.""" # op & data ev.op = ev.ev_type[0] ev.data = skytools.db_urldecode(ev.ev_data) # get pkey column names if self.pkey_str is None: if len(ev.ev_type) > 2: self.pkey_str = ev.ev_type.split(':')[1] else: self.pkey_str = ev.ev_extra2 if self.pkey_str: self.pkey_list = self.pkey_str.split(',') # get pkey value if self.pkey_str: pk_data = [] for k in self.pkey_list: pk_data.append(ev.data[k]) ev.pk_data = tuple(pk_data) elif ev.op == 'I': # fake pkey, just to get them spread out ev.pk_data = ev.id else: raise Exception('non-pk tables not supported: %s' % self.name) # get full column list, detect added columns if not self.col_list: self.col_list = ev.data.keys() elif self.col_list != ev.data.keys(): # ^ supposedly python guarantees same order in keys() # find new columns for c in ev.data.keys(): if c not in self.col_list: for oldev in self.ev_list: oldev.data[c] = None self.col_list = ev.data.keys() # add to list self.ev_list.append(ev) # keep all versions of row data if ev.pk_data in self.pkey_map: self.pkey_map[ev.pk_data].append(ev) else: self.pkey_map[ev.pk_data] = [ev] def finish(self): """Got all data, prepare for insertion.""" del_list = [] ins_list = [] upd_list = [] for ev_list in self.pkey_map.values(): # rewrite list of I/U/D events to # optional DELETE and optional INSERT/COPY command exists_before = -1 exists_after = 1 for ev in ev_list: if ev.op == "I": if exists_before < 0: exists_before = 0 exists_after = 1 elif ev.op == "U": if exists_before < 0: exists_before = 1 #exists_after = 1 # this shouldnt be needed elif ev.op == "D": if exists_before < 0: exists_before = 1 exists_after = 0 else: raise Exception('unknown event type: %s' % ev.op) # skip short-lived rows if exists_before == 0 and exists_after == 0: continue # take last event ev = ev_list[-1] # generate needed commands if exists_before and exists_after: upd_list.append(ev.data) elif exists_before: del_list.append(ev.data) elif exists_after: ins_list.append(ev.data) # reorder cols new_list = self.pkey_list[:] for k in self.col_list: if k not in self.pkey_list: new_list.append(k) self.col_list = new_list self.final_ins_list = ins_list self.final_upd_list = upd_list self.final_del_list = del_list class BulkLoader(pgq.SerialConsumer): __doc__ = __doc__ load_method = METH_CORRECT remap_tables = {} def __init__(self, args): pgq.SerialConsumer.__init__(self, "bulk_loader", "src_db", "dst_db", args) def reload(self): pgq.SerialConsumer.reload(self) self.load_method = self.cf.getint("load_method", METH_CORRECT) if self.load_method not in (METH_CORRECT,METH_DELETE,METH_MERGED): raise Exception("bad load_method") self.remap_tables = {} for mapelem in self.cf.getlist("remap_tables", ''): tmp = mapelem.split(':') tbl = tmp[0].strip() new = tmp[1].strip() self.remap_tables[tbl] = new def process_remote_batch(self, src_db, batch_id, ev_list, dst_db): """Content dispatcher.""" # add events to per-table caches tables = {} for ev in ev_list: tbl = ev.extra1 if not tbl in tables: tables[tbl] = TableCache(tbl) cache = tables[tbl] cache.add_event(ev) # then process them for tbl, cache in tables.items(): cache.finish() self.process_one_table(dst_db, tbl, cache) def process_one_table(self, dst_db, tbl, cache): del_list = cache.final_del_list ins_list = cache.final_ins_list upd_list = cache.final_upd_list col_list = cache.col_list real_update_count = len(upd_list) self.log.debug("process_one_table: %s (I/U/D = %d/%d/%d)" % ( tbl, len(ins_list), len(upd_list), len(del_list))) if tbl in self.remap_tables: old = tbl tbl = self.remap_tables[tbl] self.log.debug("Redirect %s to %s" % (old, tbl)) # hack to unbroke stuff if self.load_method == METH_MERGED: upd_list += ins_list ins_list = [] # check if interesting table curs = dst_db.cursor() if not skytools.exists_table(curs, tbl): self.log.warning("Ignoring events for table: %s" % tbl) return # fetch distribution fields dist_fields = find_dist_fields(curs, tbl) extra_fields = [] for fld in dist_fields: if fld not in cache.pkey_list: extra_fields.append(fld) self.log.debug("PKey fields: %s Extra fields: %s" % ( ",".join(cache.pkey_list), ",".join(extra_fields))) # create temp table temp = self.create_temp_table(curs, tbl) # where expr must have pkey and dist fields klist = [] for pk in cache.pkey_list + extra_fields: exp = "%s.%s = %s.%s" % (quote_fqident(tbl), quote_ident(pk), quote_fqident(temp), quote_ident(pk)) klist.append(exp) whe_expr = " and ".join(klist) # create del sql del_sql = "delete from only %s using %s where %s" % ( quote_fqident(tbl), quote_fqident(temp), whe_expr) # create update sql slist = [] key_fields = cache.pkey_list + extra_fields for col in cache.col_list: if col not in key_fields: exp = "%s = %s.%s" % (quote_ident(col), quote_fqident(temp), quote_ident(col)) slist.append(exp) upd_sql = "update only %s set %s from %s where %s" % ( quote_fqident(tbl), ", ".join(slist), quote_fqident(temp), whe_expr) # insert sql colstr = ",".join([quote_ident(c) for c in cache.col_list]) ins_sql = "insert into %s (%s) select %s from %s" % ( quote_fqident(tbl), colstr, colstr, quote_fqident(temp)) # process deleted rows if len(del_list) > 0: self.log.info("Deleting %d rows from %s" % (len(del_list), tbl)) # delete old rows q = "truncate %s" % quote_fqident(temp) self.log.debug(q) curs.execute(q) # copy rows self.log.debug("COPY %d rows into %s" % (len(del_list), temp)) skytools.magic_insert(curs, temp, del_list, col_list) # delete rows self.log.debug(del_sql) curs.execute(del_sql) self.log.debug("%s - %d" % (curs.statusmessage, curs.rowcount)) self.log.debug(curs.statusmessage) if len(del_list) != curs.rowcount: self.log.warning("Delete mismatch: expected=%s updated=%d" % (len(del_list), curs.rowcount)) # process updated rows if len(upd_list) > 0: self.log.info("Updating %d rows in %s" % (len(upd_list), tbl)) # delete old rows q = "truncate %s" % quote_fqident(temp) self.log.debug(q) curs.execute(q) # copy rows self.log.debug("COPY %d rows into %s" % (len(upd_list), temp)) skytools.magic_insert(curs, temp, upd_list, col_list) if self.load_method == METH_CORRECT: # update main table self.log.debug(upd_sql) curs.execute(upd_sql) self.log.debug(curs.statusmessage) # check count if len(upd_list) != curs.rowcount: self.log.warning("Update mismatch: expected=%s updated=%d" % (len(upd_list), curs.rowcount)) else: # delete from main table self.log.debug(del_sql) curs.execute(del_sql) self.log.debug(curs.statusmessage) # check count if real_update_count != curs.rowcount: self.log.warning("Update mismatch: expected=%s deleted=%d" % (real_update_count, curs.rowcount)) # insert into main table if AVOID_BIZGRES_BUG: # copy again, into main table self.log.debug("COPY %d rows into %s" % (len(upd_list), tbl)) skytools.magic_insert(curs, tbl, upd_list, col_list) else: # better way, but does not work due bizgres bug self.log.debug(ins_sql) curs.execute(ins_sql) self.log.debug(curs.statusmessage) # process new rows if len(ins_list) > 0: self.log.info("Inserting %d rows into %s" % (len(ins_list), tbl)) skytools.magic_insert(curs, tbl, ins_list, col_list) # delete remaining rows if USE_LONGLIVED_TEMP_TABLES: q = "truncate %s" % quote_fqident(temp) else: # fscking problems with long-lived temp tables q = "drop table %s" % quote_fqident(temp) self.log.debug(q) curs.execute(q) def create_temp_table(self, curs, tbl): # create temp table for loading tempname = tbl.replace('.', '_') + "_loadertmp" # check if exists if USE_LONGLIVED_TEMP_TABLES: if exists_temp_table(curs, tempname): self.log.debug("Using existing temp table %s" % tempname) return tempname # bizgres crashes on delete rows arg = "on commit delete rows" arg = "on commit preserve rows" # create temp table for loading q = "create temp table %s (like %s) %s" % ( quote_fqident(tempname), quote_fqident(tbl), arg) self.log.debug("Creating temp table: %s" % q) curs.execute(q) return tempname if __name__ == '__main__': script = BulkLoader(sys.argv[1:]) script.start() skytools-3.2.6/old/bulk_loader.txt0000644000000000000000000000535412426435645014123 0ustar = bulk_loader(1) = == NAME == bulk_loader - PgQ consumer that loads urlencoded records to slow databases == SYNOPSIS == bulk_loader.py [switches] config.ini == DESCRIPTION == bulk_loader is PgQ consumer that reads url encoded records from source queue and writes them into tables according to configuration file. It is targeted to slow databases that cannot handle applying each row as separate statement. Originally written for BizgresMPP/greenplumDB which have very high per-statement overhead, but can also be used to load regular PostgreSQL database that cannot manage regular replication. Behaviour properties: - reads urlencoded "logutriga" records. - does not do partitioning, but allows optionally redirect table events. - does not keep event order. - always loads data with COPY, either directly to main table (INSERTs) or to temp tables (UPDATE/COPY) then applies from there. Events are usually procuded by `pgq.logutriga()`. Logutriga adds all the data of the record into the event (also in case of updates and deletes). == QUICK-START == Basic bulk_loader setup and usage can be summarized by the following steps: 1. pgq and logutriga must be installed in source databases. See pgqadm man page for details. target database must also have pgq_ext schema. 2. edit a bulk_loader configuration file, say bulk_loader_sample.ini 3. create source queue $ pgqadm.py ticker.ini create 4. Tune source queue to have big batches: $ pgqadm.py ticker.ini config ticker_max_count="10000" ticker_max_lag="10 minutes" ticker_idle_period="10 minutes" 5. create target database and tables in it. 6. launch bulk_loader in daemon mode $ bulk_loader.py -d bulk_loader_sample.ini 7. start producing events (create logutriga trggers on tables) CREATE OR REPLACE TRIGGER trig_bulk_replica AFTER INSERT OR UPDATE ON some_table FOR EACH ROW EXECUTE PROCEDURE pgq.logutriga('') == CONFIG == include::common.config.txt[] === Config options specific to `bulk_loader` === src_db:: Connect string for source database where the queue resides. dst_db:: Connect string for target database where the tables should be created. remap_tables:: Optional parameter for table redirection. Contains comma-separated list of : pairs. Eg: `oldtable1:newtable1, oldtable2:newtable2`. load_method:: Optional parameter for load method selection. Available options: 0:: UPDATE as UPDATE from temp table. This is default. 1:: UPDATE as DELETE+COPY from temp table. 2:: merge INSERTs with UPDATEs, then do DELETE+COPY from temp table. == LOGUTRIGA EVENT FORMAT == include::common.logutriga.txt[] == COMMAND LINE SWITCHES == include::common.switches.txt[] skytools-3.2.6/NEWS0000644000000000000000000004273512426435645011024 0ustar 2014-11-05 - SkyTools 3.2.6 = Minor features = * pgq: added support for syslog facility * pgq.cascade.admin: allow drop-node of root = Fixes = * londiste.handlers.dispatch: improved handling of obsolete partitions * londiste.playback: skip execution of sql script when already executed * pgq.cascade.admin: fixed takeover issue "cannot subscribe to itself" * pgq: clean up pgq.consumer when consumer is unregistered * skytools.scripting: moved psycopg2 reference to actual script using it * skytools.timeutil: fixed for Python versions less than 2.7 * skytools.timeutil: fixed handling of Z in parse_iso_timestamp * walmgr: ignore .history files when removing PG_RECEIVEXLOG file = Cleanups = * scripts/data_maintainer.py: added log msg about script's normal shutdown * libusual: updated to the latest version (that was already 9 months old) * setup*.py: fixes; updated to point to correct licence * Improved docs and tests. 2014-03-31 - SkyTools 3.2 - "Hit any user to continue" = Features = * scripts/data_maintainer.py: can use csv file as source * debian: support Postgres 9.3, 9.4 = Minor features = * londiste: added ignore_truncate handler option * londiste add-table: added lock_timeout option to avoid possible locking issues * scripts/data_maintainer.py: added dynamic throttling support (via sql) * skytools.scripting: added options to lower log level for exceptions * skytools.scripting: simple retriable execute (for sql) = Fixes = * londiste.create_partition: fixed to avoid deadlocks from concurrent workers * londiste.handlers.dispatch: fixed "ignore events aiming at obsolete partitions" bug * pgq.cascade.admin: do not allow subscribing to itself (same node) * pgq.consumer: fixed RetriableEvent and RetriableWalkerEvent * skytools.dbstruct: quote sequence names * skytools.set_tcp_keepalive: add compat with newer OSX, simplify, enable win32 * scripts/data_maintainer.py: fixed to survive null in stats column of sql_modify * sql/londiste: fixed londiste.table_info trigger issue * sql/pgq: fix retry-queue vs. newgrants * sql/pgq: tag pgq.batch_id_seq as dumpable (extension) = Cleanups = * londiste.drop_obsolete_partitions: drop tables in alphabetical order (that is from the oldest) * londiste.handlers: removed duplicate debug logging * londiste: moved config processing from dispatch to base handler * pgq.localconsumer: added file path check on config load * scripts/data_maintainer.py: clean-up * debian: include data_maintainer3 in package * Get rid of dictfetch*() usage, it's obsolete. 2013-07-31 - SkyTools 3.1.5 - "Caution, Blind Man Driving" = Features = * walmgr: new command createslave and new option --synch-standby * londiste: Dispatch handler extended to support sharding * skytools.skylog: added UdpTNetStringsHandler = Minor features = * londiste add: --skip-non-existing option * londiste add-table: --skip-non-existing ignores tables not on provider * londiste add-table: --find-copy-node working with --create now * londiste resync: supports --find-copy-node and --copy-node options now * londiste resync: now checks if table is available on provider * londiste --wait-sync: show progress in absolute numbers * londiste.handlers.dispatch: added switch to ignore events aiming at obsolete (dropped) partitions * querybuilder: show list of missing arguments on KeyError * scripts/simple_local_consumer.py: added consumer_filter option * skytools.sockutil.set_tcp_keepalive: accept socket object directly = Fixes = * londiste copy: fix data filtering when parallel_copies > 1 * londiste.playback: support multiple -v options for workers * londiste.repair: made less verbose * pgq.Consumer: non-lazy loading should use RetriableEvent * pgq.logutriga: do not quote '-' * grantfu: 2-pass processing = Cleanups = * londiste: inform about no tables passed on cmd line (thus nothing to do) * doc: mention config option in create-* commands * parse_pgarray: allow None * londiste status: make node name easier to select * scripts/data_maintainer.py: added suffix "3" to service name * skytools.timeutil: make tests more robust * londiste: changed handler argument 'key' to 'hash_key' * londiste: moved hash_key logic from dispatch to part handler * londiste.handlers: renamed handler 'part' to 'shard' * pgq: Consumer class also need _make_event, for _load_batch_events_old() * pgq: move RetriableEvent to consumer.py 2013-04-17 - SkyTools 3.1.4 - "Boldly Going Nowhere" = Features = * londiste create-node: Creation data from config. The need to 2 connection string in command line was major usability problem in create-* commands. Now initial setup can be described in config. * newgrants: Londiste can read from provider using non-privileged user. * scripts/data_maintainer.py: Generic script for processing large data sets in small batches. = Minor features = * londiste status: significant speed up of getting status on big cascades * londiste: remote_extra_connstr config option that will be added to remote node connect strings. Needed when more than several cascaded scripts are running, with different privileges in local and remote node. * scriptmgr: User switching with sudo. Script sections can contain user= option that makes scriptmgr launch sudo to run script. * londiste compare: Added --count-only switch * BaseScript: skylog_locations config options * skytools.Config: New default variables: config_dir, config_file. * Parse & merge Postgres connect strings * skytools_upgrade: add force option * londiste: new drop_obsolete_partitions(2) function * londiste: added dispatch handler arg retention_period = Fixes = * londiste: Sync SQL keywords with 9.3git. * handers/dispatch.py: 'keep_latest' row_mode processes now deletes properly. * configure.ac: Check for -lrt - needed when building against libevent. * adminscripts: Make info commands not use pidfile. Otherwise they will not run if some writing admin command is running. * Londiste compare: It's now compatible with 8.2. * londiste.create_partition: Set owner to match parent table. * londiste.create_trigger: Fixed skip-trigger check compatibility with postgres 9.x. * londiste.create_trigger: Added check for after-trigger being overshadowed by skip-trigger. = Cleanups = * Refactor Consumer code. * Remove: pgqadm and related code, its unmaintained * Sweeping change to postpone log string formatting * docs: copy-condition removed from londiste3 man page * Various logging cleanups * Londiste: fix trigger generation when extra params that are add-table specific are used (introduced in commit 364ade9) * londiste: quote queue name in trigger args * londiste: actually execute the ENABLE/DISABLE RULE query in londiste.create_partition * londiste/handlers/dispatch: fixed issue with missing part_func description * londiste.handler: disable shortcut for specifying handler args together with its name 2012-12-21 - SkyTools 3.1.3 - "Chainsaw-wielding Toothfairy" = Features = * londiste resurrect: Sync queue contents on old dead root. * londiste node-status: Show info about local node. * londiste takeover --dead-root: search for node with latest batches. * londiste compare/repair: work with tables not available on immediate provider * londiste change-handler: change python handler and trigger on table = Minor features and cleanups = * Auto-upgrade Londiste 2 schema. * debian: /etc/skytools.ini is preconfigured for pgqd and londiste3 scripts. * Clean londiste/cascadeadmin --help text. * syncer: Clean up error messages. * syncer: Add missing throttling. * londiste handlers parameter validation. * Pure-Python fallback for hashtext module. = Fixes = * If pgq.unregister_consumer() is used on subconsumer, don't drop main consumer. * londiste add-table throws error if table already exists but with different handler. * syncer: throttle queries when waiting * Fix exception_hook crash if curs.query is None * Fix extension version mismatches which made them uninstallable. 2012-11-02 - SkyTools 3.1.2 - "Tank in a Tarball" = Features = * --find-copy-node switch to 'add-table' that instructs 'copy' to walk upwards until it finds node that has full table. Parts of the patch were in earlier, but they did not work on merge node properly. Now the walk code was moved to copy, instead being done during add-table to fix that. Compare and repair does not support (yet) such walking. = Minor Features = * Allow overrided options with --set to show up in --ini output * Londiste: try to show exact event info on failure. = Fixes = * walmgr: Move .walshipping.last update AFTER rsync. Otherwise rsync failure will not be noticed during "master sync". * londiste execute: when ddl was already applied, connection was left in wrong mode. Fix. * Fix compare expression - md5() was in wrong place. * Fix Londiste compare and repair to accept copy-condition from handler (Asko Oja) * londiste: don't filter EXECUTE or TRUNCATE events on merge node * qadmin: make "install pgq" also install pgq_coop * Fix 2.1 to 3.0 upgrade script, rename to pgq.upgrade_2.1_to_3.0.sql * Reorg of Londiste tests (Asko Oja) * Socket library autoconf (Tony Arkles) * pgq: quote internal table names * Bump pgq schema version to 3.1.2 2012-10-09 - SkyTools 3.1.1 - "Service Oriented Diapers" Queue_mover and queue_splitter now have service names ending with 3. Config files need to change: [queue_mover3], [queue_splitter3]. = Minor Features = * scriptmgr: option for specifying service type (-t / --type) * scriptmgr: status command can optionally be more selective * skytools_upgrade: "dry run" option (--not-really) * pgq.Consumer: log idle time, keepalive_stats tper * pgq.Consumer: keepalive_stats to force logging even when idle = Cleanups = * qadmin.py: "show version" command * qadmin.py: unified results displaying * skytools.scripting: added shutdown() to BaseScript * skytools.scripting: log config reload, updated comment. * skytools.__init__: added symbols for natsort module * qadmin.py: updated script version, added pgq version, amended messages * scripts: changed queue_mover and queue_splitter service_name to end with '3' = Fixes = * pgqd: make connections handling more robust * londiste compare/repair: when provider is branch, stop it's worker to get sync * walmgr: Don't complain if a nonexistent WAL file is encountered during cleanup. * pgq_node.demote_root: decrease lock level * londiste.local_remove_table: resture DDL if still attached to table * pgq_coop.next_batch: check and create subconsumers after locking main consumer * --sync-watermark: don't allow subtree wm get above upstream wm * londiste compare: calculate common column names * londiste compare: use md5() if comparing 8.3 with 8.4+ * Use python interpreter given by '--with-python=PYTHON' everywhere 2012-07-19 - SkyTools 3.1 - "Carnival-on-a-Stick" = Features = * londiste: Wait commands > londiste $ini add-table .. --wait-sync > londiste $ini wait-sync Wait until all tables are fully copied over. > londiste $ini wait-root Wait for next tick on root reaches local node. > londiste $ini wait-provider Wait for next tick on provider reaches local node. * londiste execute: support meta attributes in sql This allows execute ADD COLUMN from root, over whole cascade even when not all nodes contain the table. --*-- Local-Table: mytable ALTER TABLE mytable ADD COLUMN foo text; * Convert DB modules to extensions (9.1+). Now following modules are available as extensions: pgq, pgq_node, pgq_coop, pgq_ext, londiste. All the old files are kept in old locations to avoid any breakage. Known problems in Postgres extensions infrastructure (as of 9.1.4): - pg_dump crashes when extensions has sequence whose value needs to be dumped. Thus pgq.batch_id_seq cannot be made dumpable, thus it's value gets lost during dump+reload. - CREATE EXTENSION pgq_coop FROM unpackaged; + DROP EXTENSION pgq_coop; will not drop schema. Plain CREATE+DROP works fine. = Minor Features = * londiste.applied_execute: drop queue_name from pkey * pgq.LocalConsumer: consumer that tracks processed ticks in local file * simple_local_consumer: LocalConsumer that runs query for each event. * pgq.Consumer: 'table_filter' config param, filters on ev_extra1, where PgQ triggers put source table name. * londiste.periodic_maintenance: cleanup for applied_execute * pgq.maint_operations: include londiste.periodic_maintenance * skytools.exists_view(): new function * skytools.fileutil: new module, contains write_atomic() * londiste.handler: make encoding= param available to part & londiste handlers * debian: build modules for all installed server-dev versions = Fixes = * CascadedConsumer: re-initialize provider connection when location changes * pgq_node.drop_node: mention node name in info message * drop-node: move find_root before local drop * pgq.maint_rotate_tables: check txid sanity, fail if bad * sql_tokenizer: allow idents starting with underscore * BaseScript: write pidfile atomically to avoid corrupt pidfiles. * londiste replay: skip provider checks if copy_node is used * CascadedWorker: don't move watermark on source further than incoming batch. 2012-05-30 - SkyTools 3.0.3 - "Biometric Authentication by Yodeling" = Features = * londiste copy: copy table from another node (add-table --copy-node=NODE) * londiste remove-table: drop sequence too * public.create_partition: move under londiste schema, it's now generic enough * londiste.create_partitions: Support copying rules from master table * handlers.dispatch: use londiste.create_partitions, use old func as fallback * walmgr: add option for init-slave to add password from file to .pgpass * walmgr: add command synch-standby = Fixes = * CascadedWorker: merge-leaf-to-branch needs to publish wm info * pgq_node.create_node: create branch nodes with disable_insert set * pgq.insert_event: ignore disable_insert if in 'replica' role * create_partition: public grant needs special casing * skytools.dbstruct: better rule name redirection * debian: add build-deps, force debian/control rebuild * pgq_node.unregister_location: do automatic unsubscribe * pgq_node.drop_node: drop queue data even if node_info is empty * londiste add-table: --expect-sync does not require table existance on provider = Cleanups = * skytools_upgrade: show old version before upgrade * CascadeAdmin: add node name prefix messages from db * handlers.dispatch: add comment about differences from part_func * londiste.find_table_oid: drop old version - 9.1+ panics when argument name changes * dbservice.get_record_list: do not use parse_pgarray on python lists = Win32 = * skytools.scripting: catch EINTR from sleep() * signal_pidfile: support sig=0 on win32 * skytools.scripting: detect if signal name exists = Schema version changes = * pgq_node.version(): 3.0.0.18 * londiste.version(): 3.0.0.16 2012-05-10 - SkyTools 3.0.2 - "Consumes 30% Less Brain Than Competing Products" = Features = * Draft implementation of fine-grained grants for SQL functions. See commit doc/sql-grants.txt for details. New roles are created during upgrade, but new grants need to be applied manually. * pgq.create_queue: instead pre-determined grants, copy grants for event tables from pgq.event_template. * simple_consumer.py script, for simply event processing by simply launching a SQL function for each event. * londiste.handlers.part: make hash function configurable * psycopgwrapper: allow name= argument to .cursor() = Fixes = * londiste: Always use TRUNCATE CASCADE * pgq.cascade.CascadedWorker: publish_local_wm() is missing a commit * walmgr: fix backup from slave (Sergey Burladyan) = Cleanups = * pgq.insert_event(): outer wrapper does not need to be secdef * source.cfg: minor improvements * sql/pgq: use terse verbosity in regtests 2012-04-18 - SkyTools 3.0.1 - "All The Snow You Can Eat" = Features = * skytools.config: new .getbytes() method to support human-readable sizes. = Fixes = * skytools.skylog: Wrap logger.handlers.SysLogHandler to work around broken BOM addition when logging unicode() strings. * skytools.skylog: Improve compatibility with direct logger module usage. * debian/skytools.ini: include in tgz * pgqd(libusual): ignore EINTR on close() * Better error message in londiste.handler.dispatch when part_field is NULL. * Bump internal version in pgq/pgq_coop/pgq_node schemas. Seems I forgot to do it for 3.0, probably because there were doc-only changes in SQL code. 2012-03-17 - SkyTools 3.0 - "Business Advantage" = Major New Features = * Cascaded queues * Londiste: Parallel copy * Londiste: EXECUTE * Londiste: handlers * QAdmin * pgqd skytools-3.2.6/autogen.sh0000755000000000000000000000005112426435645012307 0ustar #! /bin/sh ./lib/mk/std-autogen.sh lib skytools-3.2.6/misc/0000755000000000000000000000000012426435645011245 5ustar skytools-3.2.6/misc/docheck.sh0000755000000000000000000000055612426435645013212 0ustar #! /bin/sh PYTHONPATH=python:$PYTHONPATH export PYTHONPATH if test "$1" = ""; then for f in \ python/skytools/*.py \ python/pgq/*.py \ python/pgq/cascade/*.py \ python/londiste/*.py \ python/*.py \ scripts/*.py do pychecker --config misc/pychecker.rc "$f" done else for f in "$@"; do pychecker --config misc/pychecker.rc "$f" done fi skytools-3.2.6/misc/pychecker.rc0000644000000000000000000001125512426435645013554 0ustar # # .pycheckrc file for Skytools # # only warn about files passed on the command line only = 1 # the maximum number of warnings to be displayed limit = 50 # list of evil C extensions that crash the interpreter evil = [] # unused imports importUsed = 1 # unused imports from __init__.py packageImportUsed = 1 # module imports itself reimportSelf = 1 ignoreImportErrors = 0 # reimporting a module moduleImportErrors = 1 # module does import and from ... import mixImport = 1 # unused local variables, except tuples localVariablesUsed = 0 # all unused local variables, including tuples unusedLocalTuple = 0 # all unused class data members membersUsed = 0 # all unused module variables allVariablesUsed = 0 # unused private module variables privateVariableUsed = 1 # report each occurrence of global warnings reportAllGlobals = 1 # functions called with named arguments (like keywords) namedArgs = 0 # Attributes (members) must be defined in __init__() onlyCheckInitForMembers = True # Subclass.__init__() not defined initDefinedInSubclass = 0 # Baseclass.__init__() not called baseClassInitted = 1 # Subclass needs to override methods that only throw exceptions abstractClasses = 1 # Return None from __init__() returnNoneFromInit = 1 # unreachable code unreachableCode = 0 # a constant is used in a conditional statement constantConditions = 1 # 1 is used in a conditional statement (if 1: or while 1:) constant1 = 0 # check if iterating over a string stringIteration = 1 # check improper use of string.find() stringFind = 1 # Calling data members as functions callingAttribute = 0 # class attribute does not exist classAttrExists = 1 # First argument to methods methodArgName = 'self' # First argument to classmethods classmethodArgNames = ['cls', 'klass'] # unused method/function arguments argumentsUsed = False # unused method/function variable arguments varArgumentsUsed = 1 # ignore if self is unused in methods ignoreSelfUnused = 0 # check if overridden methods have the same signature checkOverridenMethods = 1 # check if __special__ methods exist and have the correct signature checkSpecialMethods = 1 # check if function/class/method names are reused redefiningFunction = 1 # check if using unary positive (+) which is usually meaningless unaryPositive = 1 # check if modify (call method) on a parameter that has a default value modifyDefaultValue = 1 # check if variables are set to different types inconsistentTypes = True # check if unpacking a non-sequence unpackNonSequence = 1 # check if unpacking sequence with the wrong length unpackLength = 1 # check if raising or catching bad exceptions badExceptions = True # check if statement appears to have no effect noEffect = 1 # check if using (expr % 1), it has no effect on integers and strings modulo1 = 1 # check if using (expr is const-literal), doesn't always work on integers and strings isLiteral = 1 # check consistent return values checkReturnValues = 1 # check if using implict and explicit return values checkImplicitReturns = 1 # check that attributes of objects exist checkObjectAttrs = 1 # various warnings about incorrect usage of __slots__ slots = 1 # using properties with classic classes classicProperties = 1 # check if __slots__ is empty emptySlots = 1 # check if using integer division intDivide = 1 # check if local variable shadows a global shadows = 1 # check if a variable shadows a builtin shadowBuiltins = 0 # check if input() is used usesInput = 1 # check if the exec statement is used usesExec = True # ignore warnings from files under standard library ignoreStandardLibrary = 0 # ignore warnings from the list of modules blacklist = ['Tkinter', 'wxPython', 'gtk', 'GTK', 'GDK'] # ignore global variables not used if name is one of these values variablesToIgnore = ['__version__', '__warningregistry__', '__all__', '__credits__', '__test__', '__author__', '__email__', '__revision__', '__id__', '__copyright__', '__license__', '__date__'] # ignore unused locals/arguments if name is one of these values unusedNames = ['_', 'empty', 'unused', 'dummy'] # ignore use of deprecated modules/functions deprecated = False # maximum lines in a function maxLines = 200 # maximum branches in a function maxBranches = 30 # maximum returns in a function maxReturns = 20 # maximum # of arguments to a function maxArgs = 10 # maximum # of locals in a function maxLocals = 30 # maximum # of identifier references (Law of Demeter) maxReferences = 5 # no module doc strings noDocModule = True # no class doc strings noDocClass = False # no function/method doc strings noDocFunc = False # print internal checker parse structures printParse = 0 # turn on debugging for checker debug = 0 # turn off all output except warnings quiet = 1 skytools-3.2.6/misc/fixman.py0000755000000000000000000000042512426435645013105 0ustar #! /usr/bin/env python import sys,re # hacks to force empty lines into manpage ln1 = r"\1\2" xml = sys.stdin.read() xml = re.sub(r"(\s*)(\s*)( 70: print ln.strip() ln = "" print ln.strip() skytools-3.2.6/misc/extra.css0000644000000000000000000000025212426435645013101 0ustar /* extra.css: make code blocks more different */ div.literalblock { border: 1px solid silver; background: #f4f4f4; padding: 0.5em; } /* eof extra.css */ skytools-3.2.6/misc/Cindent0000755000000000000000000000054312426435645012561 0ustar #!/bin/sh PARAM="-npro -kr -i8 -ts8 -sob -l120 -ss -ncs -cp1" PARAM="-npro -kr -i8 -ts8 -nsob -l80 -ss -ncs -cp1 -il0" PARAM="-npro -kr -i8 -ts8 -nsob -hnl -l110 -ss -ncs -cp1 -il0" for t in Datum PgqTriggerEvent TriggerData uint8 uint32 uint64 \ StringInfo Oid TransactionId do PARAM="$PARAM -T $t" done echo indent $PARAM "$@" indent $PARAM "$@" skytools-3.2.6/misc/lint.rc0000644000000000000000000002234112426435645012543 0ustar # lint Python modules using external checkers. # # This is the main checker controling the other ones and the reports # generation. It is itself both a raw checker and an astng checker in order # to: # * handle message activation / deactivation at the module level # * handle some basic but necessary stats'data (number of classes, methods...) # [MASTER] # Specify a configuration file. #rcfile= # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). #init-hook= # Profiled execution. profile=no # Add to the black list. It should be a base name, not a # path. You may set this option multiple times. ignore=CVS # Pickle collected data for later comparisons. persistent=yes # Set the cache size for astng objects. cache-size=500 # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. load-plugins= [MESSAGES CONTROL] #['metrics', 'design', 'format', 'similarities', 'variables', 'rpython', 'miscellaneous', 'imports', 'basic', 'classes', 'master', 'typecheck', 'exceptions', 'newstyle'] # Enable only checker(s) with the given id(s). This option conflicts with the # disable-checker option #enable-checker= # Enable all checker(s) except those with the given id(s). This option # conflicts with the enable-checker option #disable-checker=design # Enable all messages in the listed categories. #enable-msg-cat= # Disable all messages in the listed categories. #disable-msg-cat= # Enable the message(s) with the given id(s). #enable-msg= # Disable the message(s) with the given id(s). disable-msg=W0142,R0201,W0613,W0622,C0321,W0612,W0614,W0102,W0602,W0603,W0704,W0702,I0001,W0212,W0401,W0141,E1102,W0232,W0703,W0614,W0613,W0401 [REPORTS] # set the output format. Available formats are text, parseable, colorized, msvs # (visual studio) and html output-format=text # Include message's id in output include-ids=no # Put messages in a separate file for each module / package specified on the # command line instead of printing them on stdout. Reports (if any) will be # written in a file name "pylint_global.[txt|html]". files-output=no # Tells wether to display a full report or only the messages reports=no # Python expression which should return a note less than 10 (10 is the highest # note).You have access to the variables errors warning, statement which # respectivly contain the number of errors / warnings messages and the total # number of statements analyzed. This is used by the global evaluation report # (R0004). evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) # Add a comment according to your evaluation note. This is used by the global # evaluation report (R0004). comment=no # Enable the report(s) with the given id(s). #enable-report= # Disable the report(s) with the given id(s). #disable-report= # try to find bugs in the code using type inference # [TYPECHECK] # Tells wether missing members accessed in mixin class should be ignored. A # mixin class is detected if its name ends with "mixin" (case insensitive). ignore-mixin-members=yes # List of classes names for which member attributes should not be checked # (useful for classes with attributes dynamicaly set). ignored-classes=SQLObject # When zope mode is activated, consider the acquired-members option to ignore # access to some undefined attributes. zope=no # List of members which are usually get through zope's acquisition mecanism and # so shouldn't trigger E0201 when accessed (need zope=yes to be considered). acquired-members=REQUEST,acl_users,aq_parent # checks for # * unused variables / imports # * undefined variables # * redefinition of variable from builtins or from an outer scope # * use of variable before assigment # [VARIABLES] # Tells wether we should check for unused import in __init__ files. init-import=no # A regular expression matching names used for dummy variables (i.e. not used). dummy-variables-rgx=_|dummy # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. additional-builtins= # checks for : # * doc strings # * modules / classes / functions / methods / arguments / variables name # * number of arguments, local variables, branchs, returns and statements in # functions, methods # * required module attributes # * dangerous default values as arguments # * redefinition of function / method / class # * uses of the global statement # [BASIC] # Required attributes for module, separated by a comma required-attributes= # Regular expression which should only match functions or classes name which do # not require a docstring #no-docstring-rgx=_.* no-docstring-rgx=.* # FIXME # Regular expression which should only match correct module names module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ # Regular expression which should only match correct module level names #const-rgx=(([A-Za-z_][A-Za-z0-9_]*)|(__.*__))$ const-rgx=.* # Regular expression which should only match correct class names #class-rgx=[A-Za-z_][_a-zA-Z0-9]+$ class-rgx=.* # Regular expression which should only match correct function names #function-rgx=[a-z_][a-z0-9_]*$ function-rgx=.* # Regular expression which should only match correct method names #method-rgx=[a-z_][a-z0-9_]*$ method-rgx=.* # Regular expression which should only match correct instance attribute names #attr-rgx=[a-z_][a-z0-9_]*$ attr-rgx=.* # Regular expression which should only match correct argument names #argument-rgx=[a-z_][a-z0-9_]*$ argument-rgx=.* # Regular expression which should only match correct variable names variable-rgx=[a-z_][a-z0-9_]*$ # Regular expression which should only match correct list comprehension / # generator expression variable names inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ # Good variable names which should always be accepted, separated by a comma good-names=i,j,k,ex,Run,_ # Bad variable names which should always be refused, separated by a comma bad-names=foo,bar,baz,toto,tutu,tata # List of builtins function names that should not be used, separated by a comma bad-functions=map,filter,apply,input # checks for : # * methods without self as first argument # * overridden methods signature # * access only to existant members via self # * attributes not defined in the __init__ method # * supported interfaces implementation # * unreachable code # [CLASSES] # List of interface methods to ignore, separated by a comma. This is used for # instance to not check methods defines in Zope's Interface base class. ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by # List of method names used to declare (i.e. assign) instance attributes. defining-attr-methods=__init__,__new__,setUp # checks for # * external modules dependencies # * relative / wildcard imports # * cyclic imports # * uses of deprecated modules # [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules=regsub,string,TERMIOS,Bastion,rexec # Create a graph of every (i.e. internal and external) dependencies in the # given file (report R0402 must not be disabled) import-graph= # Create a graph of external dependencies in the given file (report R0402 must # not be disabled) ext-import-graph= # Create a graph of internal dependencies in the given file (report R0402 must # not be disabled) int-import-graph= # checks for sign of poor/misdesign: # * number of methods, attributes, local variables... # * size, complexity of functions, methods # [DESIGN] # Maximum number of arguments for function / method max-args=10 # Maximum number of locals for function / method body max-locals=30 # Maximum number of return / yield for function / method body max-returns=10 # Maximum number of branch for function / method body max-branchs=30 # Maximum number of statements in function / method body max-statements=100 # Maximum number of parents for a class (see R0901). max-parents=7 # Maximum number of attributes for a class (see R0902). max-attributes=30 # Minimum number of public methods for a class (see R0903). min-public-methods=0 # Maximum number of public methods for a class (see R0904). max-public-methods=140 # checks for similarities and duplicated code. This computation may be # memory / CPU intensive, so you should disable it if you experiments some # problems. # [SIMILARITIES] # Minimum lines number of a similarity. min-similarity-lines=4 # Ignore comments when computing similarities. ignore-comments=yes # Ignore docstrings when computing similarities. ignore-docstrings=yes # checks for : # * unauthorized constructions # * strict indentation # * line length # * use of <> instead of != # [FORMAT] # Maximum number of characters on a single line. max-line-length=200 # Maximum number of lines in a module max-module-lines=1000 # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 # tab). indent-string=' ' # checks for: # * warning notes in the code like FIXME, XXX # * PEP 263: source code with non ascii character but no encoding declaration # [MISCELLANEOUS] # List of note tags to take in consideration, separated by a comma. notes=FIXME,XXX,TODO skytools-3.2.6/misc/checkver.sh0000755000000000000000000000143512426435645013401 0ustar #! /bin/sh err=0 for s in pgq pgq_node pgq_coop londiste pgq_ext; do code_hash=$(git log --raw -n 1 sql/$s/functions | head -1) fn="sql/$s/functions/$s.version.sql" ver_hash=$(git log --raw -n 1 "$fn" | head -1) test "${code_hash}" = "${ver_hash}" || echo "$s has code changes, needs new version" ver_func=$(sed -n "s/.*return *'\(.*\)';/\1/;T;p" $fn) ver_control=$(sed -n "s/default_version = '\(.*\)'/\1/;T;p" sql/$s/$s.control) ver_make=$(sed -n "s/EXT_VERSION = \(.*\)/\1/;T;p" sql/$s/Makefile) if test "${ver_func}|${ver_control}" = "${ver_make}|${ver_make}"; then echo "$s: $ver_control" else echo "$s: version mismatch" echo " Makefile: $ver_make" echo " version(): $ver_func" echo " control: $ver_control" err=1 fi done exit $err skytools-3.2.6/misc/pychecker.strict.rc0000644000000000000000000001135012426435645015057 0ustar # # .pycheckrc file for Skytools # # only warn about files passed on the command line only = 1 # the maximum number of warnings to be displayed limit = 50 # list of evil C extensions that crash the interpreter evil = [] # unused imports importUsed = 1 # unused imports from __init__.py packageImportUsed = 1 # module imports itself reimportSelf = 1 ignoreImportErrors = 0 # reimporting a module moduleImportErrors = 1 # module does import and from ... import mixImport = 0 # unused local variables, except tuples localVariablesUsed = 1 # all unused local variables, including tuples unusedLocalTuple = 0 # all unused class data members membersUsed = 0 # all unused module variables allVariablesUsed = 1 # unused private module variables privateVariableUsed = 1 # report each occurrence of global warnings reportAllGlobals = 1 # functions called with named arguments (like keywords) namedArgs = 0 # Attributes (members) must be defined in __init__() onlyCheckInitForMembers = True # Subclass.__init__() not defined initDefinedInSubclass = 0 # Baseclass.__init__() not called baseClassInitted = 1 # Subclass needs to override methods that only throw exceptions abstractClasses = 1 # Return None from __init__() returnNoneFromInit = 1 # unreachable code unreachableCode = 0 # a constant is used in a conditional statement constantConditions = 1 # 1 is used in a conditional statement (if 1: or while 1:) constant1 = 0 # check if iterating over a string stringIteration = 1 # check improper use of string.find() stringFind = 1 # Calling data members as functions callingAttribute = 0 # class attribute does not exist classAttrExists = 1 # First argument to methods methodArgName = 'self' # First argument to classmethods classmethodArgNames = ['cls', 'klass'] # unused method/function arguments argumentsUsed = False # unused method/function variable arguments varArgumentsUsed = 1 # ignore if self is unused in methods ignoreSelfUnused = 0 # check if overridden methods have the same signature checkOverridenMethods = 1 # check if __special__ methods exist and have the correct signature checkSpecialMethods = 1 # check if function/class/method names are reused redefiningFunction = 1 # check if using unary positive (+) which is usually meaningless unaryPositive = 1 # check if modify (call method) on a parameter that has a default value modifyDefaultValue = 1 # check if variables are set to different types inconsistentTypes = True # check if unpacking a non-sequence unpackNonSequence = 1 # check if unpacking sequence with the wrong length unpackLength = 1 # check if raising or catching bad exceptions badExceptions = False # DBScript wants to catch KeyboardException + SystemExit # check if statement appears to have no effect noEffect = 1 # check if using (expr % 1), it has no effect on integers and strings modulo1 = 1 # check if using (expr is const-literal), doesn't always work on integers and strings isLiteral = 1 # check consistent return values checkReturnValues = 1 # check if using implict and explicit return values checkImplicitReturns = 1 # check that attributes of objects exist checkObjectAttrs = 1 # various warnings about incorrect usage of __slots__ slots = 1 # using properties with classic classes classicProperties = 1 # check if __slots__ is empty emptySlots = 1 # check if using integer division intDivide = 1 # check if local variable shadows a global shadows = 1 # check if a variable shadows a builtin shadowBuiltins = 1 # check if input() is used usesInput = 1 # check if the exec statement is used usesExec = True # ignore warnings from files under standard library ignoreStandardLibrary = 0 # ignore warnings from the list of modules blacklist = ['Tkinter', 'wxPython', 'gtk', 'GTK', 'GDK'] # ignore global variables not used if name is one of these values variablesToIgnore = ['__version__', '__warningregistry__', '__all__', '__credits__', '__test__', '__author__', '__email__', '__revision__', '__id__', '__copyright__', '__license__', '__date__'] # ignore unused locals/arguments if name is one of these values unusedNames = ['_', 'empty', 'unused', 'dummy'] # ignore use of deprecated modules/functions deprecated = False # maximum lines in a function maxLines = 100 # maximum branches in a function maxBranches = 20 # maximum returns in a function maxReturns = 10 # maximum # of arguments to a function maxArgs = 10 # maximum # of locals in a function maxLocals = 20 # maximum # of identifier references (Law of Demeter) maxReferences = 5 # no module doc strings noDocModule = False # no class doc strings noDocClass = False # no function/method doc strings noDocFunc = False # print internal checker parse structures printParse = 0 # turn on debugging for checker debug = 0 # turn off all output except warnings quiet = 1 skytools-3.2.6/misc/getattrs.py0000755000000000000000000000020712426435645013456 0ustar #! /usr/bin/env python import sys buf = open(sys.argv[1], "r").read().lower() if buf.find("pgq consumer") >= 0: print "-a pgq" skytools-3.2.6/configure.ac0000644000000000000000000001004512426435645012600 0ustar dnl Process this file with autoconf to produce a configure script. AC_INIT(skytools, 3.2.6) AC_CONFIG_SRCDIR(python/londiste.py) AC_CONFIG_HEADER(lib/usual/config.h) AC_PREREQ([2.59]) dnl Find Python interpreter AC_ARG_WITH(python, [ --with-python=PYTHON name of the Python executable (default: python)], [ AC_MSG_CHECKING(for python) PYTHON=$withval AC_MSG_RESULT($PYTHON)], [ AC_PATH_PROGS(PYTHON, python) ]) test -n "$PYTHON" || AC_MSG_ERROR([Cannot continue without Python]) dnl Find PostgreSQL pg_config AC_ARG_WITH(pgconfig, [ --with-pgconfig=PG_CONFIG path to pg_config (default: pg_config)], [ AC_MSG_CHECKING(for pg_config) PG_CONFIG=$withval AC_MSG_RESULT($PG_CONFIG)], [ AC_PATH_PROGS(PG_CONFIG, pg_config) ]) test -n "$PG_CONFIG" || AC_MSG_ERROR([Cannot continue without pg_config]) dnl Find GNU make AC_MSG_CHECKING(for GNU make) if test ! -n "$MAKE"; then for a in make gmake gnumake; do if "$a" --version 2>&1 | grep GNU > /dev/null; then MAKE="$a" break fi done fi if test -n "$MAKE"; then AC_MSG_RESULT($MAKE) else AC_MSG_ERROR([GNU make is not found]) fi AC_SUBST(MAKE) dnl name suffix default_SUFFIX=3 SUFFIX="$default_SUFFIX" AC_MSG_CHECKING(whether to add suffix to script names) AC_ARG_WITH(suffix, [ --with-suffix[[=sfx]] common package suffix (default: 3)], [ if test "$withval" = "yes"; then SUFFIX="$default_SUFFIX" elif test "$withval" = "no"; then SUFFIX="" else SUFFIX="$withval" fi ]) if test "$SUFFIX" = ""; then AC_MSG_RESULT(no) else AC_MSG_RESULT([yes, "$SUFFIX"]) fi AC_SUBST(SUFFIX) dnl asciidoc >= 8.2 AC_ARG_WITH(asciidoc, [ --with-asciidoc[[=prog]] path to asciidoc 8.2 (default: asciidoc)], [ if test "$withval" = "yes"; then AC_CHECK_PROGS(ASCIIDOC, [$ASCIIDOC asciidoc]) test -n "$ASCIIDOC" || ASCIIDOC=no else AC_MSG_CHECKING(for asciidoc) ASCIIDOC=$withval AC_MSG_RESULT($ASCIIDOC) fi ], [ ASCIIDOC="no" ]) if test "$ASCIIDOC" != "no"; then AC_MSG_CHECKING([whether asciidoc version >= 8.2]) ver=`$ASCIIDOC --version 2>&1 | sed -e 's/asciidoc //'` case "$ver" in dnl hack to make possible to use [, ] in regex changequote({, })dnl [0-7].*|8.[01]|8.[01].*) changequote([, ])dnl AC_MSG_RESULT([$ver, too old]) ASCIIDOC="no" ;; *) AC_MSG_RESULT([$ver, ok]) ;; esac fi dnl Decide default skylog setting SKYLOG=0 AC_MSG_CHECKING(for skylog default setting) AC_ARG_WITH(skylog, [ --with-skylog set default value for skylog (default: off)], [ if test "$withval" = "yes"; then SKYLOG="1" elif test "$withval" = "no"; then SKYLOG="0" else SKYLOG="$withval" fi AC_MSG_RESULT([$SKYLOG])]) AC_MSG_RESULT([$SKYLOG]) AC_SUBST(SKYLOG) dnl Decide module subdir SK3_SUBDIR=0 AC_MSG_CHECKING([whether to install modules in /skytools-3.0/ subdir]) AC_ARG_WITH(sk3-subdir, AC_HELP_STRING([--with-sk3-subdir], [put modules into /skytools-3.0/ subdir]), [ if test "$withval" = "yes"; then SK3_SUBDIR="1" elif test "$withval" = "no"; then SK3_SUBDIR="0" else SK3_SUBDIR="$withval" fi AC_MSG_RESULT([$SK3_SUBDIR])]) AC_MSG_RESULT([$SK3_SUBDIR]) AC_SUBST(SK3_SUBDIR) dnl check for xmlto, but only if asciidoc is found if test "$ASCIIDOC" != "no"; then AC_CHECK_PROGS(XMLTO, [$XMLTO xmlto]) test -n "$XMLTO" || XMLTO=no else XMLTO="no" fi # when in .git tree, turn asciidoc/xmlto uncondicionally on if test -d .git; then if test "$ASCIIDOC" = "no"; then echo "*** Building from GIT requires asciidoc, enabling it ***" ASCIIDOC="asciidoc" fi if test "$XMLTO" = "no"; then echo "*** Building from GIT requires xmlto, enabling it ***" XMLTO="xmlto" fi fi AC_USUAL_PORT_CHECK AC_USUAL_PROGRAM_CHECK AC_USUAL_HEADER_CHECK AC_USUAL_TYPE_CHECK AC_USUAL_FUNCTION_CHECK dnl Postres headers on Solaris define incompat unsetenv without that AC_CHECK_FUNCS(unsetenv) dnl Optional use of libevent AC_SEARCH_LIBS(clock_gettime, rt) AC_USUAL_LIBEVENT_OPT dnl Needed on SmartOS (Solaris) AC_SEARCH_LIBS([socket],[socket]) AC_USUAL_DEBUG AC_USUAL_CASSERT dnl Write result AC_CONFIG_FILES([config.mak]) AC_OUTPUT skytools-3.2.6/AUTHORS0000644000000000000000000000130312426435645011357 0ustar Maintainer ---------- Marko Kreen Contributors ------------ Aleksei Plotnikov André Malo Andrew Dunstan Artyom Nosov Asko Oja Asko Tiidumaa Cédric Villemain Charles Duffy Devrim Gündüz Dimitri Fontaine Dmitriy V'jukov Doug Gorley Eero Oja Egon Valdmees Emiel van de Laar Erik Jones Glenn Davy Götz Lange Hannu Krosing Hans-Juergen Schoenig Jason Buberel Juta Vaks Kaarel Kitsemets Kristo Kaiv Luc Van Hoeylandt Lukáš Lalinský Marcin Stępnicki Mark Kirkwood Martin Otto Martin Pihlak Nico Mandery Petr Jelinek Pierre-Emmanuel André Priit Kustala Sasha Aliashkevich Sébastien Lardière Sergey Burladyan Sergey Konoplev Shoaib Mir Steve Singer Tarvi Pillessaar Tony Arkles Zoltán Böszörményi